file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_half.py | import platform
import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf:
def setup_method(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
@pytest.mark.parametrize("string_dt", ["S", "U"])
def test_half_conversion_to_string(self, string_dt):
# Currently uses S/U32 (which is sufficient for float32)
expected_dt = np.dtype(f"{string_dt}32")
assert np.promote_types(np.float16, string_dt) == expected_dt
assert np.promote_types(string_dt, np.float16) == expected_dt
arr = np.ones(3, dtype=np.float16).astype(string_dt)
assert arr.dtype == expected_dt
@pytest.mark.parametrize("string_dt", ["S", "U"])
def test_half_conversion_from_string(self, string_dt):
string = np.array("3.1416", dtype=string_dt)
assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16)
@pytest.mark.parametrize("offset", [None, "up", "down"])
@pytest.mark.parametrize("shift", [None, "up", "down"])
@pytest.mark.parametrize("float_t", [np.float32, np.float64])
def test_half_conversion_rounding(self, float_t, shift, offset):
# Assumes that round to even is used during casting.
max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
# Test all (positive) finite numbers, denormals are most interesting
# however:
f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
f16s_float = f16s_patterns.view(np.float16).astype(float_t)
# Shift the values by half a bit up or a down (or do not shift),
if shift == "up":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
elif shift == "down":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
else:
f16s_float = f16s_float[1:-1]
# Increase the float by a minimal value:
if offset == "up":
f16s_float = np.nextafter(f16s_float, float_t(1e50))
elif offset == "down":
f16s_float = np.nextafter(f16s_float, float_t(-1e50))
# Convert back to float16 and its bit pattern:
res_patterns = f16s_float.astype(np.float16).view(np.uint16)
# The above calculations tries the original values, or the exact
# mid points between the float16 values. It then further offsets them
# by as little as possible. If no offset occurs, "round to even"
# logic will be necessary, an arbitrarily small offset should cause
# normal up/down rounding always.
# Calculate the expected pattern:
cmp_patterns = f16s_patterns[1:-1].copy()
if shift == "down" and offset != "up":
shift_pattern = -1
elif shift == "up" and offset != "down":
shift_pattern = 1
else:
# There cannot be a shift, either shift is None, so all rounding
# will go back to original, or shift is reduced by offset too much.
shift_pattern = 0
# If rounding occurs, is it normal rounding or round to even?
if offset is None:
# Round to even occurs, modify only non-even, cast to allow + (-1)
cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
else:
cmp_patterns.view(np.int16)[...] += shift_pattern
assert_equal(res_patterns, cmp_patterns)
@pytest.mark.parametrize(["float_t", "uint_t", "bits"],
[(np.float32, np.uint32, 23),
(np.float64, np.uint64, 52)])
def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
# Test specifically that all bits are considered when deciding
# whether round to even should occur (i.e. no bits are lost at the
# end. Compare also gh-12721. The most bits can get lost for the
# smallest denormal:
smallest_value = np.uint16(1).view(np.float16).astype(float_t)
assert smallest_value == 2**-24
# Will be rounded to zero based on round to even rule:
rounded_to_zero = smallest_value / float_t(2)
assert rounded_to_zero.astype(np.float16) == 0
# The significand will be all 0 for the float_t, test that we do not
# lose the lower ones of these:
for i in range(bits):
# slightly increasing the value should make it round up:
larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
larger_value = larger_pattern.view(float_t)
assert larger_value.astype(np.float16) == smallest_value
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
hnan = np.array((np.nan,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
assert_equal(np.nextafter(hinf, a_f16), a_f16[-1])
assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1])
assert_equal(np.nextafter(hinf, hinf), hinf)
assert_equal(np.nextafter(hinf, -hinf), a_f16[-1])
assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1])
assert_equal(np.nextafter(-hinf, -hinf), -hinf)
assert_equal(np.nextafter(a_f16, hnan), hnan[0])
assert_equal(np.nextafter(hnan, a_f16), hnan[0])
assert_equal(np.nextafter(hnan, hnan), hnan)
assert_equal(np.nextafter(hinf, hnan), hnan)
assert_equal(np.nextafter(hnan, hinf), hnan)
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1])
assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1])
assert_equal(np.nextafter(a_f16, hnan), hnan[0])
assert_equal(np.nextafter(hnan, a_f16), hnan[0])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16, 2).dtype, float16)
assert_equal(np.power(a16, 2.0).dtype, float16)
assert_equal(np.power(a16, b16).dtype, float16)
assert_equal(np.power(a16, b32).dtype, float16)
assert_equal(np.power(a16, a16).dtype, float16)
assert_equal(np.power(a16, a32).dtype, float32)
assert_equal(np.power(b16, 2).dtype, float64)
assert_equal(np.power(b16, 2.0).dtype, float64)
assert_equal(np.power(b16, b16).dtype, float16)
assert_equal(np.power(b16, b32).dtype, float32)
assert_equal(np.power(b16, a16).dtype, float16)
assert_equal(np.power(b16, a32).dtype, float32)
assert_equal(np.power(a32, a16).dtype, float32)
assert_equal(np.power(a32, b16).dtype, float32)
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
@pytest.mark.skipif(platform.machine() == "armv5tel",
reason="See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 = np.array((1e4,), dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14-2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
assert_raises_fpe('overflow', lambda a, b:a+b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a, b:a-b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
# These should not raise
float16(65472)+float16(32)
float16(2**-13)/float16(2)
float16(2**-14)/float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
np.nextafter(float16(np.inf), float16(0))
np.nextafter(float16(-np.inf), float16(0))
np.nextafter(float16(0), float16(np.nan))
np.nextafter(float16(np.nan), float16(0))
float16(2**-14)/float16(2**10)
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
| 23,823 | Python | 41.926126 | 90 | 0.539731 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarbuffer.py | """
Test scalar buffer interface adheres to PEP 3118
"""
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import get_buffer_info
import pytest
from numpy.testing import assert_, assert_equal, assert_raises
# PEP3118 format strings for native (standard alignment and byteorder) types
scalars_and_codes = [
(np.bool_, '?'),
(np.byte, 'b'),
(np.short, 'h'),
(np.intc, 'i'),
(np.int_, 'l'),
(np.longlong, 'q'),
(np.ubyte, 'B'),
(np.ushort, 'H'),
(np.uintc, 'I'),
(np.uint, 'L'),
(np.ulonglong, 'Q'),
(np.half, 'e'),
(np.single, 'f'),
(np.double, 'd'),
(np.longdouble, 'g'),
(np.csingle, 'Zf'),
(np.cdouble, 'Zd'),
(np.clongdouble, 'Zg'),
]
scalars_only, codes_only = zip(*scalars_and_codes)
class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
x = scalar()
a = np.array([], dtype=np.dtype(scalar))
mv_x = memoryview(x)
mv_a = memoryview(a)
assert_equal(mv_x.format, mv_a.format)
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_dim(self, scalar):
x = scalar()
mv_x = memoryview(x)
assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
@pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
def test_scalar_code_and_properties(self, scalar, code):
x = scalar()
expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
shape=(), format=code, readonly=True)
mv_x = memoryview(x)
assert self._as_dict(mv_x) == expected
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_buffers_readonly(self, scalar):
x = scalar()
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
assert_(isinstance(x, np.void))
mv_x = memoryview(x)
expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
expected_size += 2 * np.dtype(np.float64).itemsize
assert_equal(mv_x.itemsize, expected_size)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
# check scalar format string against ndarray format string
a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_(isinstance(a, np.ndarray))
mv_a = memoryview(a)
assert_equal(mv_x.itemsize, mv_a.itemsize)
assert_equal(mv_x.format, mv_a.format)
# Check that we do not allow writeable buffer export (technically
# we could allow it sometimes here...)
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def _as_dict(self, m):
return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
ndim=m.ndim, format=m.format, readonly=m.readonly)
def test_datetime_memoryview(self):
# gh-11656
# Values verified with v1.13.3, shape is not () as in test_scalar_dim
dt1 = np.datetime64('2016-01-01')
dt2 = np.datetime64('2017-01-01')
expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
format='B', readonly=True)
v = memoryview(dt1)
assert self._as_dict(v) == expected
v = memoryview(dt2 - dt1)
assert self._as_dict(v) == expected
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(1, dt)
# Fails to create a PEP 3118 valid buffer
assert_raises((ValueError, BufferError), memoryview, a[0])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(dt1, ["WRITABLE"])
@pytest.mark.parametrize('s', [
pytest.param("\x32\x32", id="ascii"),
pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
])
def test_str_ucs4(self, s):
s = np.str_(s) # only our subclass implements the buffer protocol
# all the same, characters always encode as ucs4
expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
readonly=True)
v = memoryview(s)
assert self._as_dict(v) == expected
# integers of the paltform-appropriate endianness
code_points = np.frombuffer(v, dtype='i4')
assert_equal(code_points, [ord(c) for c in s])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(s, ["WRITABLE"])
def test_user_scalar_fails_buffer(self):
r = rational(1)
with assert_raises(TypeError):
memoryview(r)
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(r, ["WRITABLE"])
| 5,588 | Python | 35.292208 | 80 | 0.594488 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_umath_accuracy.py | import numpy as np
import os
from os import path
import sys
import pytest
from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER
from numpy.testing import assert_array_max_ulp
from numpy.testing._private.utils import _glibc_older_than
from numpy.core._multiarray_umath import __cpu_features__
IS_AVX = __cpu_features__.get('AVX512F', False) or \
(__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448).
runtest = (sys.platform.startswith('linux')
and IS_AVX and not _glibc_older_than("2.17"))
platform_skip = pytest.mark.skipif(not runtest,
reason="avoid testing inconsistent platform "
"library implementations")
# convert string to hex function taken from:
# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
def convert(s, datatype="np.float32"):
i = int(s, 16) # convert from hex to a Python int
if (datatype == "np.float64"):
cp = pointer(c_longlong(i)) # make this into a c long long integer
fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer
else:
cp = pointer(c_int(i)) # make this into a c integer
fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer
return fp.contents.value # dereference the pointer, get the float
str_to_float = np.vectorize(convert)
class TestAccuracy:
@platform_skip
def test_validate_transcendentals(self):
with np.errstate(all='ignore'):
data_dir = path.join(path.dirname(__file__), 'data')
files = os.listdir(data_dir)
files = list(filter(lambda f: f.endswith('.csv'), files))
for filename in files:
filepath = path.join(data_dir, filename)
with open(filepath) as fid:
file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
data = np.genfromtxt(file_without_comments,
dtype=('|S39','|S39','|S39',int),
names=('type','input','output','ulperr'),
delimiter=',',
skip_header=1)
npname = path.splitext(filename)[0].split('-')[3]
npfunc = getattr(np, npname)
for datatype in np.unique(data['type']):
data_subset = data[data['type'] == datatype]
inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
perm = np.random.permutation(len(inval))
inval = inval[perm]
outval = outval[perm]
maxulperr = data_subset['ulperr'].max()
assert_array_max_ulp(npfunc(inval), outval, maxulperr)
| 3,229 | Python | 51.096773 | 145 | 0.557138 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_arrayprint.py | import sys
import gc
from hypothesis import given
from hypothesis.extra import numpy as hynp
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
assert_raises_regex,
)
import textwrap
class TestArrayRepr:
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
@pytest.mark.xfail(reason="See gh-10544")
def test_object_subclass(self):
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
# test that object + subclass is OK:
x = sub([None, None])
assert_equal(repr(x), 'sub([None, None], dtype=object)')
assert_equal(str(x), '[None None]')
x = sub([None, sub([None, None])])
assert_equal(repr(x),
'sub([None, sub([None, None], dtype=object)], dtype=object)')
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
def test_0d_object_subclass(self):
# make sure that subclasses which return 0ds instead
# of scalars don't cause infinite recursion in str
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
x = sub(1)
assert_equal(repr(x), 'sub(1)')
assert_equal(str(x), '1')
x = sub([1, 1])
assert_equal(repr(x), 'sub([1, 1])')
assert_equal(str(x), '[1 1]')
# check it works properly with object arrays too
x = sub(None)
assert_equal(repr(x), 'sub(None, dtype=object)')
assert_equal(str(x), 'None')
# plus recursive object arrays (even depth > 1)
y = sub(None)
x[()] = y
y[()] = x
assert_equal(repr(x),
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
assert_equal(str(x), '...')
x[()] = 0 # resolve circular references for garbage collector
# nested 0d-subclass-object
x = sub(None)
x[()] = sub(None)
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
assert_equal(str(x), 'None')
# gh-10663
class DuckCounter(np.ndarray):
def __getitem__(self, item):
result = super().__getitem__(item)
if not isinstance(result, DuckCounter):
result = result[...].view(DuckCounter)
return result
def to_string(self):
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
def __str__(self):
if self.shape == ():
return self.to_string()
else:
fmt = {'all': lambda x: x.to_string()}
return np.array2string(self, formatter=fmt)
dc = np.arange(5).view(DuckCounter)
assert_equal(str(dc), "[zero one two many many]")
assert_equal(str(dc[0]), "zero")
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr0d[()] = 0 # resolve recursion for garbage collector
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
arr1d[1] = 0 # resolve recursion for garbage collector
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
first[()] = 0 # resolve circular references for garbage collector
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
def test_fieldless_structured(self):
# gh-10366
no_fields = np.dtype([])
arr_no_fields = np.empty(4, dtype=no_fields)
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
class TestComplexArray:
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
'[0.+infj]', '[0.+infj]', '[0.+infj]',
'[0.-infj]', '[0.-infj]', '[0.-infj]',
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
'[1.+infj]', '[1.+infj]', '[1.+infj]',
'[1.-infj]', '[1.-infj]', '[1.-infj]',
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
'[inf+infj]', '[inf+infj]', '[inf+infj]',
'[inf-infj]', '[inf-infj]', '[inf-infj]',
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
'[nan+infj]', '[nan+infj]', '[nan+infj]',
'[nan-infj]', '[nan-infj]', '[nan-infj]',
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
assert_equal(res, val)
class TestArray2String:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_unexpected_kwarg(self):
# ensure than an appropriate TypeError
# is raised when array2string receives
# an unexpected kwarg
with assert_raises_regex(TypeError, 'nonsense'):
np.array2string(np.array([1, 2, 3]),
nonsense=None)
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), vars(np)), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_equal(repr(A), reprA)
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_equal(repr(A), reprA)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None,None,:]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
def test_multiline_repr(self):
class MultiLine:
def __repr__(self):
return "Line 1\nLine 2"
a = np.array([[None, MultiLine()], [MultiLine(), None]])
assert_equal(
np.array2string(a),
'[[None Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2 None]]'
)
assert_equal(
np.array2string(a, max_line_width=5),
'[[None\n'
' Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2\n'
' None]]'
)
assert_equal(
repr(a),
'array([[None, Line 1\n'
' Line 2],\n'
' [Line 1\n'
' Line 2, None]], dtype=object)'
)
class MultiLineLong:
def __repr__(self):
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
assert_equal(
repr(a),
'array([[None, Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 , None]], dtype=object)'
)
assert_equal(
np.array_repr(a, 20),
'array([[None,\n'
' Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ,\n'
' None]],\n'
' dtype=object)'
)
def test_nested_array_repr(self):
a = np.empty((2, 2), dtype=object)
a[0, 0] = np.eye(2)
a[0, 1] = np.eye(3)
a[1, 0] = None
a[1, 1] = np.ones((3, 1))
assert_equal(
repr(a),
'array([[array([[1., 0.],\n'
' [0., 1.]]), array([[1., 0., 0.],\n'
' [0., 1., 0.],\n'
' [0., 0., 1.]])],\n'
' [None, array([[1.],\n'
' [1.],\n'
' [1.]])]], dtype=object)'
)
@given(hynp.from_dtype(np.dtype("U")))
def test_any_text(self, text):
# This test checks that, given any value that can be represented in an
# array of dtype("U") (i.e. unicode string), ...
a = np.array([text, text, text])
# casting a list of them to an array does not e.g. truncate the value
assert_equal(a[0], text)
# and that np.array2string puts a newline in the expected location
expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
assert_equal(result, expected_repr)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
# closure (gh-10620)
gc.disable()
a = np.arange(2)
r1 = sys.getrefcount(a)
np.array2string(a)
np.array2string(a)
r2 = sys.getrefcount(a)
gc.collect()
gc.enable()
assert_(r1 == r2)
class TestPrintOptions:
"""Test getting and setting global print options."""
def setup_method(self):
self.oldopts = np.get_printoptions()
def teardown_method(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
assert_equal(str(np.array(u'café', '<U4')), u'café')
assert_equal(repr(np.array('café', '<U4')),
"array('café', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
assert_equal(str(a[0]), '([0, 0, 0],)')
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
"array(10, dtype='timedelta64[Y]')")
# repr of 0d arrays is affected by printoptions
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
# str is unaffected
assert_equal(str(x), "1")
# check `style` arg raises
assert_warns(DeprecationWarning, np.array2string,
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
# gh-10934 style was broken in legacy mode, check it works
np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
y = np.array([1., 2., -10.])
z = np.array([100., 2., -1.])
w = np.array([-100., 2., 1.])
assert_equal(repr(x), 'array([1., 2., 3.])')
assert_equal(repr(y), 'array([ 1., 2., -10.])')
assert_equal(repr(np.array(y[0])), 'array(1.)')
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
assert_equal(repr(z), 'array([100., 2., -1.])')
assert_equal(repr(w), 'array([-100., 2., 1.])')
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
x = np.array([np.inf, 100000, 1.1234])
y = np.array([np.inf, 100000, -1.1234])
z = np.array([np.inf, 1.1234, -1e120])
np.set_printoptions(precision=2)
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
def test_bool_spacing(self):
assert_equal(repr(np.array([True, True])),
'array([ True, True])')
assert_equal(repr(np.array([True, False])),
'array([ True, False])')
assert_equal(repr(np.array([True])),
'array([ True])')
assert_equal(repr(np.array(True)),
'array(True)')
assert_equal(repr(np.array(False)),
'array(False)')
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
assert_equal(repr(c),
"array([ 1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
assert_equal(repr(c),
"array([+1. +1.j , +1.12345679+1.12345679j])")
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(b), 'array([ 1.23400000e+09])')
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
assert_equal(repr(c),
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
# gh-10383
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
def test_float_overflow_nowarn(self):
# make sure internal computations in FloatingFormat don't
# warn about overflow
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
a = np.ones(2, dtype='<f,<f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
def test_floatmode(self):
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
y = np.array([0.2918820979355541, 0.5064172631089138,
0.2848750619642916, 0.4342965294660567,
0.7326538397312751, 0.3459503329096204,
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
# note: we construct w from the strings `1eXX` instead of doing
# `10.**arange(24)` because it turns out the two are not equivalent in
# python. On some architectures `1e23 != 10.**23`.
wp = np.array([1.234e1, 1e2, 1e123])
# unique mode
np.set_printoptions(floatmode='unique')
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
" 0.0862072768214508 , 0.39112753029631175])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w),
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.123456789+1.123456789j])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
assert_equal(repr(x),
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
" 0.2383, 0.4226], dtype=float16)")
assert_equal(repr(y),
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
assert_equal(repr(z),
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
assert_equal(repr(c),
"array([1.0000+1.0000j, 1.1235+1.1235j])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
" 0.50000000], dtype=float16)")
# maxprec_equal mode, precision=8
np.set_printoptions(floatmode='maxprec_equal', precision=8)
assert_equal(repr(x),
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
assert_equal(repr(y),
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
# test unique special case (gh-18609)
a = np.float64.fromhex('-1p-97')
assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
# use * for non-finite imaginary part
np.set_printoptions(legacy='1.13')
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
np.set_printoptions(legacy=False)
assert_equal(str(np.float64(1.123456789123456789)),
'1.1234567891234568')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
def test_legacy_stray_comma(self):
np.set_printoptions(legacy='1.13')
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
np.set_printoptions(legacy=False)
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
def test_dtype_linewidth_wrapping(self):
np.set_printoptions(linewidth=75)
assert_equal(repr(np.arange(10,20., dtype='f4')),
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
styp = '<U4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2])""")
)
np.set_printoptions(linewidth=17, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2, 2])""")
)
a = np.full(8, fill_value=2)
np.set_printoptions(linewidth=18, legacy=False)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2, 2])""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2, 2,
2, 2, 2, 2])""")
)
def test_linewidth_str(self):
a = np.full(18, fill_value=2)
np.set_printoptions(linewidth=18)
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2
2 2]""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2]""")
)
def test_edgeitems(self):
np.set_printoptions(edgeitems=1, threshold=1)
a = np.arange(27).reshape((3, 3, 3))
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
b = np.zeros((3, 3, 1, 1))
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[0.]],
...,
[[0.]]],
...,
[[[0.]],
...,
[[0.]]]])""")
)
# 1.13 had extra trailing spaces, and was missing newlines
np.set_printoptions(legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[ 0.]],
...,
[[ 0.]]],
...,
[[[ 0.]],
...,
[[ 0.]]]])""")
)
def test_bad_args(self):
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
assert_raises(TypeError, np.set_printoptions, threshold='1')
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
assert_raises(TypeError, np.set_printoptions, precision='1')
assert_raises(TypeError, np.set_printoptions, precision=1.5)
def test_unicode_object_array():
expected = "array(['é'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
class TestContextManager:
def test_ctx_mgr(self):
# test that context manager actually works
with np.printoptions(precision=2):
s = str(np.array([2.0]) / 3)
assert_equal(s, '[0.67]')
def test_ctx_mgr_restores(self):
# test that print options are actually restrored
opts = np.get_printoptions()
with np.printoptions(precision=opts['precision'] - 1,
linewidth=opts['linewidth'] - 4):
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_exceptions(self):
# test that print options are restored even if an exception is raised
opts = np.get_printoptions()
try:
with np.printoptions(precision=2, linewidth=11):
raise ValueError
except ValueError:
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_as_smth(self):
opts = {"precision": 2}
with np.printoptions(**opts) as ctx:
saved_opts = ctx.copy()
assert_equal({k: saved_opts[k] for k in opts}, opts)
| 37,161 | Python | 37.390496 | 91 | 0.468879 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_casting_unittests.py | """
The tests exercise the casting machinery in a more low-level manner.
The reason is mostly to test a new implementation of the casting machinery.
Unlike most tests in NumPy, these are closer to unit-tests rather
than integration tests.
"""
import pytest
import textwrap
import enum
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
# Simple skips object, parametric and long double (unsupported by struct)
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
# Remove l and L, the table was generated with 64bit linux in mind.
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
"""Returns the string length when casting the basic dtypes to strings.
"""
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1 # adds one character for the sign
return length
# Note: Can't do dtype comparison for longdouble on windows
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
These test cases exercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res), view_off = (
cast._resolve_descriptors((from_dt, to_dt)))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if view_off is not None:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = casting <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert casting & CAST_TABLE[from_Dt][type(time_dt)]
assert view_off is None
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "expected_view_off",
"nom", "denom"],
[("M8[ns]", None, Casting.no, 0, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1),
("M8", "M8[ms]", Casting.safe, 0, 1, 1),
# should be invalid cast:
("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no, 0, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1),
("m8", "m8[ms]", Casting.safe, 0, 1, 1),
# should be invalid cast:
("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt,
expected_casting, expected_view_off,
nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
assert view_off == expected_view_off
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert view_off is None
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
assert view_off is None
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
assert view_off is None
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(string_dt, None))
assert safety == Casting.unsafe
assert view_off is None
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
(other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
expected_view_off = None
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no
expected_view_off = 0
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert view_off == expected_view_off
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
if change_length <= 0:
assert view_off == expected_view_off
else:
assert view_off is None
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_object_and_simple_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(object_dtype, type(dtype))
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), dtype))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt is dtype
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), None))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt == dtype.newbyteorder("=")
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_simple_to_object_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(type(dtype), object_dtype)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(dtype, None))
assert safety == Casting.safe
assert view_off is None
assert res_dt is np.dtype("O")
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
@pytest.mark.parametrize(["to_dt", "expected_off"],
[ # Same as `from_dt` but with both fields shifted:
(np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Additional change of the names
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Incompatible field offset change
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 6]}), None)])
def test_structured_field_offsets(self, to_dt, expected_off):
# This checks the cast-safety and view offset for swapped and "shifted"
# fields which are viewable
from_dt = np.dtype({"names": ["a", "b"],
"formats": ["i4", "f4"],
"offsets": [2, 6]})
cast = get_castingimpl(type(from_dt), type(to_dt))
safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
if from_dt.names == to_dt.names:
assert safety == Casting.equiv
else:
assert safety == Casting.safe
# Shifting the original data pointer by -2 will align both by
# effectively adding 2 bytes of spacing before `from_dt`.
assert view_off == expected_off
@pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
# Subarray cases:
("i", "(1,1)i", 0),
("(1,1)i", "i", 0),
("(2,1)i", "(2,1)i", 0),
# field cases (field to field is tested explicitly also):
# Not considered viewable, because a negative offset would allow
# may structured dtype to indirectly access invalid memory.
("i", dict(names=["a"], formats=["i"], offsets=[2]), None),
(dict(names=["a"], formats=["i"], offsets=[2]), "i", 2),
# Currently considered not viewable, due to multiple fields
# even though they overlap (maybe we should not allow that?)
("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]),
None),
# different number of fields can't work, should probably just fail
# so it never reports "viewable":
("i,i", "i,i,i", None),
# Unstructured void cases:
("i4", "V3", 0), # void smaller or equal
("i4", "V4", 0), # void smaller or equal
("i4", "V10", None), # void is larger (no view)
("O", "V4", None), # currently reject objects for view here.
("O", "V8", None), # currently reject objects for view here.
("V4", "V3", 0),
("V4", "V4", 0),
("V3", "V4", None),
# Note that currently void-to-other cast goes via byte-strings
# and is not a "view" based cast like the opposite direction:
("V4", "i4", None),
# completely invalid/impossible cast:
("i,i", "i,i,i", None),
])
def test_structured_view_offsets_paramteric(
self, from_dt, to_dt, expected_off):
# TODO: While this test is fairly thorough, right now, it does not
# really test some paths that may have nonzero offsets (they don't
# really exists).
from_dt = np.dtype(from_dt)
to_dt = np.dtype(to_dt)
cast = get_castingimpl(type(from_dt), type(to_dt))
_, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
assert view_off == expected_off
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_object_casts_NULL_None_equivalence(self, dtype):
# None to <other> casts may succeed or fail, but a NULL'ed array must
# behave the same as one filled with None's.
arr_normal = np.array([None] * 5)
arr_NULLs = np.empty_like([None] * 5)
# If the check fails (maybe it should) the test would lose its purpose:
assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
try:
expected = arr_normal.astype(dtype)
except TypeError:
with pytest.raises(TypeError):
arr_NULLs.astype(dtype),
else:
assert_array_equal(expected, arr_NULLs.astype(dtype))
@pytest.mark.parametrize("dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
def test_nonstandard_bool_to_other(self, dtype):
# simple test for casting bool_ to numeric types, which should not
# expose the detail that NumPy bools can sometimes take values other
# than 0 and 1. See also gh-19514.
nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
res = nonstandard_bools.astype(dtype)
expected = [0, 1, 1]
assert_array_equal(res, expected)
| 33,863 | Python | 40.704433 | 81 | 0.538877 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_arraymethod.py | """
This file tests the generic aspects of ArrayMethod. At the time of writing
this is private API, but when added, public API may be added here.
"""
from __future__ import annotations
import sys
import types
from typing import Any
import pytest
import numpy as np
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
class TestResolveDescriptors:
# Test mainly error paths of the resolve_descriptors function,
# note that the `casting_unittests` tests exercise this non-error paths.
# Casting implementations are the main/only current user:
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
@pytest.mark.parametrize("args", [
(True,), # Not a tuple.
((None,)), # Too few elements
((None, None, None),), # Too many
((None, None),), # Input dtype is None, which is invalid.
((np.dtype("d"), True),), # Output dtype is not a dtype
((np.dtype("f"), None),), # Input dtype does not match method
])
def test_invalid_arguments(self, args):
with pytest.raises(TypeError):
self.method._resolve_descriptors(*args)
class TestSimpleStridedCall:
# Test mainly error paths of the resolve_descriptors function,
# note that the `casting_unittests` tests exercise this non-error paths.
# Casting implementations are the main/only current user:
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
@pytest.mark.parametrize(["args", "error"], [
((True,), TypeError), # Not a tuple
(((None,),), TypeError), # Too few elements
((None, None), TypeError), # Inputs are not arrays.
(((None, None, None),), TypeError), # Too many
(((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
(((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
TypeError), # Does not support byte-swapping
(((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
ValueError), # not 1-D
(((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
ValueError), # different length
(((np.frombuffer(b"\0x00"*3*2, dtype="d"),
np.frombuffer(b"\0x00"*3, dtype="f")),),
ValueError), # output not writeable
])
def test_invalid_arguments(self, args, error):
# This is private API, which may be modified freely
with pytest.raises(error):
self.method._simple_strided_call(*args)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
@pytest.mark.parametrize(
"cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap]
)
class TestClassGetItem:
def test_class_getitem(self, cls: type[np.ndarray]) -> None:
"""Test `ndarray.__class_getitem__`."""
alias = cls[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert cls[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
cls[arg_tup]
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
def test_class_getitem_38() -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
np.ndarray[Any, Any]
| 3,570 | Python | 36.989361 | 78 | 0.617087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/_locales.py | """Provide class for testing in French locale
"""
import sys
import locale
import pytest
__ALL__ = ['CommaDecimalPointLocale']
def find_comma_decimal_point_locale():
"""See if platform has a decimal point as comma locale.
Find a locale that uses a comma instead of a period as the
decimal point.
Returns
-------
old_locale: str
Locale when the function was called.
new_locale: {str, None)
First French locale found, None if none found.
"""
if sys.platform == 'win32':
locales = ['FRENCH']
else:
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
old_locale = locale.getlocale(locale.LC_NUMERIC)
new_locale = None
try:
for loc in locales:
try:
locale.setlocale(locale.LC_NUMERIC, loc)
new_locale = loc
break
except locale.Error:
pass
finally:
locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
return old_locale, new_locale
class CommaDecimalPointLocale:
"""Sets LC_NUMERIC to a locale with comma as decimal point.
Classes derived from this class have setup and teardown methods that run
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
the decimal point instead of periods ('.'). On exit the locale is restored
to the initial locale. It also serves as context manager with the same
effect. If no such locale is available, the test is skipped.
.. versionadded:: 1.15.0
"""
(cur_locale, tst_locale) = find_comma_decimal_point_locale()
def setup(self):
if self.tst_locale is None:
pytest.skip("No French locale available")
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
def teardown(self):
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
def __enter__(self):
if self.tst_locale is None:
pytest.skip("No French locale available")
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
def __exit__(self, type, value, traceback):
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
| 2,192 | Python | 28.24 | 79 | 0.636861 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_simd_module.py | import pytest
from numpy.core._simd import targets
"""
This testing unit only for checking the sanity of common functionality,
therefore all we need is just to take one submodule that represents any
of enabled SIMD extensions to run the test on it and the second submodule
required to run only one check related to the possibility of mixing
the data types among each submodule.
"""
npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd]
npyv, npyv2 = (npyvs + [None, None])[:2]
unsigned_sfx = ["u8", "u16", "u32", "u64"]
signed_sfx = ["s8", "s16", "s32", "s64"]
fp_sfx = ["f32"]
if npyv and npyv.simd_f64:
fp_sfx.append("f64")
int_sfx = unsigned_sfx + signed_sfx
all_sfx = unsigned_sfx + int_sfx
@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support")
class Test_SIMD_MODULE:
@pytest.mark.parametrize('sfx', all_sfx)
def test_num_lanes(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
vector = getattr(npyv, "setall_" + sfx)(1)
assert len(vector) == nlanes
@pytest.mark.parametrize('sfx', all_sfx)
def test_type_name(self, sfx):
vector = getattr(npyv, "setall_" + sfx)(1)
assert vector.__name__ == "npyv_" + sfx
def test_raises(self):
a, b = [npyv.setall_u32(1)]*2
for sfx in all_sfx:
vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}")
pytest.raises(TypeError, vcb("add"), a)
pytest.raises(TypeError, vcb("add"), a, b, a)
pytest.raises(TypeError, vcb("setall"))
pytest.raises(TypeError, vcb("setall"), [1])
pytest.raises(TypeError, vcb("load"), 1)
pytest.raises(ValueError, vcb("load"), [1])
pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a))
@pytest.mark.skipif(not npyv2, reason=(
"could not find a second SIMD extension with NPYV support"
))
def test_nomix(self):
# mix among submodules isn't allowed
a = npyv.setall_u32(1)
a2 = npyv2.setall_u32(1)
pytest.raises(TypeError, npyv.add_u32, a2, a2)
pytest.raises(TypeError, npyv2.add_u32, a, a)
@pytest.mark.parametrize('sfx', unsigned_sfx)
def test_unsigned_overflow(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
maxu = (1 << int(sfx[1:])) - 1
maxu_72 = (1 << 72) - 1
lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0]
assert lane == maxu
lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes)
assert lanes == [maxu] * nlanes
lane = getattr(npyv, "setall_" + sfx)(-1)[0]
assert lane == maxu
lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes)
assert lanes == [maxu] * nlanes
@pytest.mark.parametrize('sfx', signed_sfx)
def test_signed_overflow(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
maxs_72 = (1 << 71) - 1
lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0]
assert lane == -1
lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes)
assert lanes == [-1] * nlanes
mins_72 = -1 << 71
lane = getattr(npyv, "setall_" + sfx)(mins_72)[0]
assert lane == 0
lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes)
assert lanes == [0] * nlanes
def test_truncate_f32(self):
f32 = npyv.setall_f32(0.1)[0]
assert f32 != 0.1
assert round(f32, 1) == 0.1
def test_compare(self):
data_range = range(0, npyv.nlanes_u32)
vdata = npyv.load_u32(data_range)
assert vdata == list(data_range)
assert vdata == tuple(data_range)
for i in data_range:
assert vdata[i] == data_range[i]
| 3,758 | Python | 37.357142 | 100 | 0.589675 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_conversion_utils.py | """
Tests for numpy/core/src/multiarray/conversion_utils.c
"""
import re
import sys
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_warns, IS_PYPY
class StringConverterTestCase:
allow_bytes = True
case_insensitive = True
exact_match = False
warn = True
def _check_value_error(self, val):
pattern = r'\(got {}\)'.format(re.escape(repr(val)))
with pytest.raises(ValueError, match=pattern) as exc:
self.conv(val)
def _check_conv_assert_warn(self, val, expected):
if self.warn:
with assert_warns(DeprecationWarning) as exc:
assert self.conv(val) == expected
else:
assert self.conv(val) == expected
def _check(self, val, expected):
"""Takes valid non-deprecated inputs for converters,
runs converters on inputs, checks correctness of outputs,
warnings and errors"""
assert self.conv(val) == expected
if self.allow_bytes:
assert self.conv(val.encode('ascii')) == expected
else:
with pytest.raises(TypeError):
self.conv(val.encode('ascii'))
if len(val) != 1:
if self.exact_match:
self._check_value_error(val[:1])
self._check_value_error(val + '\0')
else:
self._check_conv_assert_warn(val[:1], expected)
if self.case_insensitive:
if val != val.lower():
self._check_conv_assert_warn(val.lower(), expected)
if val != val.upper():
self._check_conv_assert_warn(val.upper(), expected)
else:
if val != val.lower():
self._check_value_error(val.lower())
if val != val.upper():
self._check_value_error(val.upper())
def test_wrong_type(self):
# common cases which apply to all the below
with pytest.raises(TypeError):
self.conv({})
with pytest.raises(TypeError):
self.conv([])
def test_wrong_value(self):
# nonsense strings
self._check_value_error('')
self._check_value_error('\N{greek small letter pi}')
if self.allow_bytes:
self._check_value_error(b'')
# bytes which can't be converted to strings via utf8
self._check_value_error(b"\xFF")
if self.exact_match:
self._check_value_error("there's no way this is supported")
class TestByteorderConverter(StringConverterTestCase):
""" Tests of PyArray_ByteorderConverter """
conv = mt.run_byteorder_converter
warn = False
def test_valid(self):
for s in ['big', '>']:
self._check(s, 'NPY_BIG')
for s in ['little', '<']:
self._check(s, 'NPY_LITTLE')
for s in ['native', '=']:
self._check(s, 'NPY_NATIVE')
for s in ['ignore', '|']:
self._check(s, 'NPY_IGNORE')
for s in ['swap']:
self._check(s, 'NPY_SWAP')
class TestSortkindConverter(StringConverterTestCase):
""" Tests of PyArray_SortkindConverter """
conv = mt.run_sortkind_converter
warn = False
def test_valid(self):
self._check('quicksort', 'NPY_QUICKSORT')
self._check('heapsort', 'NPY_HEAPSORT')
self._check('mergesort', 'NPY_STABLESORT') # alias
self._check('stable', 'NPY_STABLESORT')
class TestSelectkindConverter(StringConverterTestCase):
""" Tests of PyArray_SelectkindConverter """
conv = mt.run_selectkind_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check('introselect', 'NPY_INTROSELECT')
class TestSearchsideConverter(StringConverterTestCase):
""" Tests of PyArray_SearchsideConverter """
conv = mt.run_searchside_converter
def test_valid(self):
self._check('left', 'NPY_SEARCHLEFT')
self._check('right', 'NPY_SEARCHRIGHT')
class TestOrderConverter(StringConverterTestCase):
""" Tests of PyArray_OrderConverter """
conv = mt.run_order_converter
warn = False
def test_valid(self):
self._check('c', 'NPY_CORDER')
self._check('f', 'NPY_FORTRANORDER')
self._check('a', 'NPY_ANYORDER')
self._check('k', 'NPY_KEEPORDER')
def test_flatten_invalid_order(self):
# invalid after gh-14596
with pytest.raises(ValueError):
self.conv('Z')
for order in [False, True, 0, 8]:
with pytest.raises(TypeError):
self.conv(order)
class TestClipmodeConverter(StringConverterTestCase):
""" Tests of PyArray_ClipmodeConverter """
conv = mt.run_clipmode_converter
def test_valid(self):
self._check('clip', 'NPY_CLIP')
self._check('wrap', 'NPY_WRAP')
self._check('raise', 'NPY_RAISE')
# integer values allowed here
assert self.conv(np.CLIP) == 'NPY_CLIP'
assert self.conv(np.WRAP) == 'NPY_WRAP'
assert self.conv(np.RAISE) == 'NPY_RAISE'
class TestCastingConverter(StringConverterTestCase):
""" Tests of PyArray_CastingConverter """
conv = mt.run_casting_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check("no", "NPY_NO_CASTING")
self._check("equiv", "NPY_EQUIV_CASTING")
self._check("safe", "NPY_SAFE_CASTING")
self._check("same_kind", "NPY_SAME_KIND_CASTING")
self._check("unsafe", "NPY_UNSAFE_CASTING")
class TestIntpConverter:
""" Tests of PyArray_IntpConverter """
conv = mt.run_intp_converter
def test_basic(self):
assert self.conv(1) == (1,)
assert self.conv((1, 2)) == (1, 2)
assert self.conv([1, 2]) == (1, 2)
assert self.conv(()) == ()
def test_none(self):
# once the warning expires, this will raise TypeError
with pytest.warns(DeprecationWarning):
assert self.conv(None) == ()
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_float(self):
with pytest.raises(TypeError):
self.conv(1.0)
with pytest.raises(TypeError):
self.conv([1, 1.0])
def test_too_large(self):
with pytest.raises(ValueError):
self.conv(2**64)
def test_too_many_dims(self):
assert self.conv([1]*32) == (1,)*32
with pytest.raises(ValueError):
self.conv([1]*33)
| 6,559 | Python | 30.38756 | 76 | 0.589267 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_getlimits.py | """ Test functions for limits module.
"""
import warnings
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import assert_equal, assert_, assert_raises
from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
np.complex128]))
for dt1, dt2 in dts:
for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep',
'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
'nmant', 'precision', 'resolution', 'tiny',
'smallest_normal', 'smallest_subnormal'):
assert_equal(getattr(finfo(dt1), attr),
getattr(finfo(dt2), attr), attr)
assert_raises(ValueError, finfo, 'i4')
class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(iinfo(dt1), attr),
getattr(iinfo(dt2), attr), attr)
assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
" max=3.4028235e+38, dtype=float32)"
assert_equal(repr(np.finfo(np.float32)), expected)
def test_instances():
iinfo(10)
finfo(3.0)
def assert_ma_equal(discovered, ma_like):
# Check MachAr-like objects same as calculated MachAr instances
for key, value in discovered.__dict__.items():
assert_equal(value, getattr(ma_like, key))
if hasattr(value, 'shape'):
assert_equal(value.shape, getattr(ma_like, key).shape)
assert_equal(value.dtype, getattr(ma_like, key).dtype)
def test_known_types():
# Test we are correctly compiling parameters for known types
for ftype, ma_like in ((np.float16, _float_ma[16]),
(np.float32, _float_ma[32]),
(np.float64, _float_ma[64])):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
assert_ma_equal(ld_ma, _float_ma[128])
def test_subnormal_warning():
"""Test that the subnormal is zero warning is not being raised."""
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
ld_ma.smallest_subnormal
assert len(w) == 0
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
ld_ma.smallest_subnormal
assert len(w) == 0
else:
# Double double
ld_ma.smallest_subnormal
# This test may fail on some platforms
assert len(w) == 0
def test_plausible_finfo():
# Assert that finfo returns reasonable results for all types
for ftype in np.sctypes['float'] + np.sctypes['complex']:
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
| 5,207 | Python | 34.671233 | 75 | 0.571346 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_cpu_dispatcher.py | from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
from numpy.core import _umath_tests
from numpy.testing import assert_equal
def test_dispatcher():
"""
Testing the utilities of the CPU dispatcher
"""
targets = (
"SSE2", "SSE41", "AVX2",
"VSX", "VSX2", "VSX3",
"NEON", "ASIMD", "ASIMDHP"
)
highest_sfx = "" # no suffix for the baseline
all_sfx = []
for feature in reversed(targets):
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
# for the baseline, just one object combined all of them via 'baseline' option
# within the configuration statements.
if feature in __cpu_baseline__:
continue
# check compiler and running machine support
if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
continue
if not highest_sfx:
highest_sfx = "_" + feature
all_sfx.append("func" + "_" + feature)
test = _umath_tests.test_dispatch()
assert_equal(test["func"], "func" + highest_sfx)
assert_equal(test["var"], "var" + highest_sfx)
if highest_sfx:
assert_equal(test["func_xb"], "func" + highest_sfx)
assert_equal(test["var_xb"], "var" + highest_sfx)
else:
assert_equal(test["func_xb"], "nobase")
assert_equal(test["var_xb"], "nobase")
all_sfx.append("func") # add the baseline
assert_equal(test["all"], all_sfx)
| 1,521 | Python | 34.395348 | 97 | 0.604865 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_unicode.py | import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
def buffer_length(arr):
if isinstance(arr, str):
if not arr:
charmax = 0
else:
charmax = max([ord(c) for c in arr])
if charmax < 256:
size = 1
elif charmax < 65536:
size = 2
else:
size = 4
return size * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return np.prod(v.shape) * v.itemsize
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
# Value that can be represented in UCS2 interpreters
ucs2_value = u'\u0900'
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
ucs4_value = u'\U00100900'
def test_string_cast():
str_arr = np.array(["1234", "1234\0\0"], dtype='S')
uni_arr1 = str_arr.astype('>U')
uni_arr2 = str_arr.astype('<U')
assert_(str_arr != uni_arr1)
assert_(str_arr != uni_arr2)
assert_array_equal(uni_arr1, uni_arr2)
############################################################
# Creation tests
############################################################
class CreateZeros:
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == u'')
# Encode to ascii and double check
assert_(ua_scalar.encode('ascii') == b'')
# Check buffer lengths for scalars
assert_(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
# Check creation of 0-dimensional objects
ua = np.zeros((), dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_zerosSD(self):
# Check creation of single-dimensional objects
ua = np.zeros((2,), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_zerosMD(self):
# Check creation of multi-dimensional objects
ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class TestCreateZeros_1(CreateZeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
class TestCreateZeros_2(CreateZeros):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
class TestCreateZeros_1009(CreateZeros):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
class CreateValues:
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check creation of 0-dimensional objects with values
ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
# Check creation of single-dimensional objects with values
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
# Check creation of multi-dimensional objects with values
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class TestCreateValues_1_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class TestCreateValues_1_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class TestCreateValues_2_UCS2(CreateValues):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class TestCreateValues_2_UCS4(CreateValues):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class TestCreateValues_1009_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class TestCreateValues_1009_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Assignment tests
############################################################
class AssignValues:
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
ua = np.zeros((), dtype='U%s' % self.ulen)
ua[()] = self.ucs_value*self.ulen
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
# Check assignment of single-dimensional objects with values
ua = np.zeros((2,), dtype='U%s' % self.ulen)
ua[0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0], 4*self.ulen*2)
ua[1] = self.ucs_value*self.ulen
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
# Check assignment of multi-dimensional objects with values
ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen)
ua[0, 0, 0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
ua[-1, -1, -1] = self.ucs_value*self.ulen
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class TestAssignValues_1_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class TestAssignValues_1_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class TestAssignValues_2_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class TestAssignValues_2_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class TestAssignValues_1009_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class TestAssignValues_1009_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Byteorder tests
############################################################
class ByteorderValues:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
# Check byteorder of 0-dimensional objects
ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
assert_(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesSD(self):
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
assert_((ua != ua2).all())
assert_(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesMD(self):
# Check byteorder of multi-dimensional objects
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
assert_((ua != ua2).all())
assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_values_cast(self):
# Check byteorder of when casting the array for a strided and
# contiguous array:
test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
assert_((ua == ua2).all())
assert_(ua[-1] == ua2[-1])
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_values_updowncast(self):
# Check byteorder of when casting the array to a longer and shorter
# string length for strided and contiguous arrays
test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
# Cast to a longer type with zero padding
longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
ua2 = ua.astype(dtype=longer_type)
assert_((ua == ua2).all())
assert_(ua[-1] == ua2[-1])
# Cast back again with truncating:
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
class TestByteorder_1_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class TestByteorder_1_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class TestByteorder_2_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class TestByteorder_2_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class TestByteorder_1009_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class TestByteorder_1009_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
| 12,553 | Python | 33.584022 | 84 | 0.594201 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_custom_dtypes.py | import pytest
import numpy as np
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import (
_discover_array_parameters as discover_array_params, _get_sfloat_dtype)
SF = _get_sfloat_dtype()
class TestSFloat:
def _get_array(self, scaling, aligned=True):
if not aligned:
a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
a = a.view(np.float64)
a[:] = [1., 2., 3.]
else:
a = np.array([1., 2., 3.])
a *= 1./scaling # the casting code also uses the reciprocal.
return a.view(SF(scaling))
def test_sfloat_rescaled(self):
sf = SF(1.)
sf2 = sf.scaled_by(2.)
assert sf2.get_scaling() == 2.
sf6 = sf2.scaled_by(3.)
assert sf6.get_scaling() == 6.
def test_class_discovery(self):
# This does not test much, since we always discover the scaling as 1.
# But most of NumPy (when writing) does not understand DType classes
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
assert dt == SF(1.)
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_scaled_float_from_floats(self, scaling):
a = np.array([1., 2., 3.], dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
def test_repr(self):
# Check the repr, mainly to cover the code paths:
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_from_float(self, scaling):
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
@pytest.mark.parametrize("aligned", [True, False])
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_getitem(self, aligned, scaling):
a = self._get_array(1., aligned)
assert a.tolist() == [1., 2., 3.]
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_casts(self, aligned):
a = self._get_array(1., aligned)
assert np.can_cast(a, SF(-1.), casting="equiv")
assert not np.can_cast(a, SF(-1.), casting="no")
na = a.astype(SF(-1.))
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
assert np.can_cast(a, SF(2.), casting="same_kind")
assert not np.can_cast(a, SF(2.), casting="safe")
a2 = a.astype(SF(2.))
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_cast_internal_errors(self, aligned):
a = self._get_array(2e300, aligned)
with pytest.raises(TypeError,
match="error raised inside the core-loop: non-finite factor!"):
a.astype(SF(2e-300))
def test_sfloat_promotion(self):
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
# Float64 -> SF(1.) and then promotes normally, so both of this work:
assert np.result_type(SF(3.), np.float64) == SF(3.)
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
# Test an undefined promotion:
with pytest.raises(TypeError):
np.result_type(SF(1.), np.int64)
def test_basic_multiply(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a * b
# multiplies dtype scaling and content separately:
assert res.dtype.get_scaling() == 8.
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
def test_possible_and_impossible_reduce(self):
# For reductions to work, the first and last operand must have the
# same dtype. For this parametric DType that is not necessarily true.
a = self._get_array(2.)
# Addition reductin works (as of writing requires to pass initial
# because setting a scaled-float from the default `0` fails).
res = np.add.reduce(a, initial=0.)
assert res == a.astype(np.float64).sum()
# But each multiplication changes the factor, so a reduction is not
# possible (the relaxed version of the old refusal to handle any
# flexible dtype).
with pytest.raises(TypeError,
match="the resolved dtypes are not compatible"):
np.multiply.reduce(a)
def test_basic_ufunc_at(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
float_b = b.view(np.float64).copy()
np.multiply.at(float_b, [1, 1, 1], float_a)
np.multiply.at(b, [1, 1, 1], float_a)
assert_array_equal(b.view(np.float64), float_b)
def test_basic_multiply_promotion(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
res1 = float_a * b
res2 = b * float_a
# one factor is one, so we get the factor of b:
assert res1.dtype == res2.dtype == b.dtype
expected_view = float_a * b.view(np.float64)
assert_array_equal(res1.view(np.float64), expected_view)
assert_array_equal(res2.view(np.float64), expected_view)
# Check that promotion works when `out` is used:
np.multiply(b, float_a, out=res2)
with pytest.raises(TypeError):
# The promoter accepts this (maybe it should not), but the SFloat
# result cannot be cast to integer:
np.multiply(b, float_a, out=np.arange(3))
def test_basic_addition(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a + b
# addition uses the type promotion rules for the result:
assert res.dtype == np.result_type(a.dtype, b.dtype)
expected_view = (a.astype(res.dtype).view(np.float64) +
b.astype(res.dtype).view(np.float64))
assert_array_equal(res.view(np.float64), expected_view)
def test_addition_cast_safety(self):
"""The addition method is special for the scaled float, because it
includes the "cast" between different factors, thus cast-safety
is influenced by the implementation.
"""
a = self._get_array(2.)
b = self._get_array(-2.)
c = self._get_array(3.)
# sign change is "equiv":
np.add(a, b, casting="equiv")
with pytest.raises(TypeError):
np.add(a, b, casting="no")
# Different factor is "same_kind" (default) so check that "safe" fails
with pytest.raises(TypeError):
np.add(a, c, casting="safe")
# Check that casting the output fails also (done by the ufunc here)
with pytest.raises(TypeError):
np.add(a, a, out=c, casting="safe")
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_casts_to_bool(self, ufunc):
a = self._get_array(2.)
a[0] = 0. # make sure first element is considered False.
float_equiv = a.astype(float)
expected = ufunc(float_equiv, float_equiv)
res = ufunc(a, a)
assert_array_equal(res, expected)
# also check that the same works for reductions:
expected = ufunc.reduce(float_equiv)
res = ufunc.reduce(a)
assert_array_equal(res, expected)
# The output casting does not match the bool, bool -> bool loop:
with pytest.raises(TypeError):
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
| 7,677 | Python | 37.009901 | 79 | 0.589814 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_extint128.py | import itertools
import contextlib
import operator
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
INT64_MIN = np.iinfo(np.int64).min
INT64_MID = 2**32
# int128 is not two's complement, the sign bit is separate
INT128_MAX = 2**128 - 1
INT128_MIN = -INT128_MAX
INT128_MID = 2**64
INT64_VALUES = (
[INT64_MIN + j for j in range(20)] +
[INT64_MAX - j for j in range(20)] +
[INT64_MID + j for j in range(-20, 20)] +
[2*INT64_MID + j for j in range(-20, 20)] +
[INT64_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70))
)
INT128_VALUES = (
[INT128_MIN + j for j in range(20)] +
[INT128_MAX - j for j in range(20)] +
[INT128_MID + j for j in range(-20, 20)] +
[2*INT128_MID + j for j in range(-20, 20)] +
[INT128_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70)) +
[False] # negative zero
)
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
@contextlib.contextmanager
def exc_iter(*args):
"""
Iterate over Cartesian product of *args, and if an exception is raised,
add information of the current iterate.
"""
value = [None]
def iterate():
for v in itertools.product(*args):
value[0] = v
yield v
try:
yield iterate()
except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
raise AssertionError(msg)
def test_safe_binop():
# Test checked arithmetic routines
ops = [
(operator.add, 1),
(operator.sub, 2),
(operator.mul, 3)
]
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
for xop, a, b in it:
pyop, op = xop
c = pyop(a, b)
if not (INT64_MIN <= c <= INT64_MAX):
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
else:
d = mt.extint_safe_binop(a, b, op)
if c != d:
# assert_equal is slow
assert_equal(d, c)
def test_to_128():
with exc_iter(INT64_VALUES) as it:
for a, in it:
b = mt.extint_to_128(a)
if a != b:
assert_equal(b, a)
def test_to_64():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if not (INT64_MIN <= a <= INT64_MAX):
assert_raises(OverflowError, mt.extint_to_64, a)
else:
b = mt.extint_to_64(a)
if a != b:
assert_equal(b, a)
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for a, b in it:
c = a * b
d = mt.extint_mul_64_64(a, b)
if c != d:
assert_equal(d, c)
def test_add_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a + b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_add_128, a, b)
else:
d = mt.extint_add_128(a, b)
if c != d:
assert_equal(d, c)
def test_sub_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a - b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_sub_128, a, b)
else:
d = mt.extint_sub_128(a, b)
if c != d:
assert_equal(d, c)
def test_neg_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
b = -a
c = mt.extint_neg_128(a)
if b != c:
assert_equal(c, b)
def test_shl_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -(((-a) << 1) & (2**128-1))
else:
b = (a << 1) & (2**128-1)
c = mt.extint_shl_128(a)
if b != c:
assert_equal(c, b)
def test_shr_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -((-a) >> 1)
else:
b = a >> 1
c = mt.extint_shr_128(a)
if b != c:
assert_equal(c, b)
def test_gt_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a > b
d = mt.extint_gt_128(a, b)
if c != d:
assert_equal(d, c)
@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if a >= 0:
c, cr = divmod(a, b)
else:
c, cr = divmod(-a, b)
c = -c
cr = -cr
d, dr = mt.extint_divmod_128_64(a, b)
if c != d or d != dr or b*d + dr != a:
assert_equal(d, c)
assert_equal(dr, cr)
assert_equal(b*d + dr, a)
def test_floordiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = a // b
d = mt.extint_floordiv_128_64(a, b)
if c != d:
assert_equal(d, c)
def test_ceildiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = (a + b - 1) // b
d = mt.extint_ceildiv_128_64(a, b)
if c != d:
assert_equal(d, c)
| 5,643 | Python | 24.654545 | 76 | 0.473684 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_overrides.py | import inspect
import sys
import os
import tempfile
from io import StringIO
from unittest import mock
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
_get_implementing_args, array_function_dispatch,
verify_matching_signatures, ARRAY_FUNCTION_ENABLED)
from numpy.compat import pickle
import pytest
requires_array_function = pytest.mark.skipif(
not ARRAY_FUNCTION_ENABLED,
reason="__array_function__ dispatch not enabled.")
def _return_not_implemented(self, *args, **kwargs):
return NotImplemented
# need to define this at the top level to test pickling
@array_function_dispatch(lambda array: (array,))
def dispatched_one_arg(array):
"""Docstring."""
return 'original'
@array_function_dispatch(lambda array1, array2: (array1, array2))
def dispatched_two_arg(array1, array2):
"""Docstring."""
return 'original'
class TestGetImplementingArgs:
def test_ndarray(self):
array = np.array(1)
args = _get_implementing_args([array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, 1])
assert_equal(list(args), [array])
args = _get_implementing_args([1, array])
assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
array = np.array(1).view(np.ndarray)
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
args = _get_implementing_args([array, override_sub])
assert_equal(list(args), [override_sub, array])
args = _get_implementing_args([array, no_override_sub])
assert_equal(list(args), [no_override_sub, array])
args = _get_implementing_args(
[override_sub, no_override_sub])
assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
other = Other()
args = _get_implementing_args([other, array])
assert_equal(list(args), [other, array])
args = _get_implementing_args([array, other])
assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
subarray = np.array(1).view(OverrideSub)
other = Other()
assert_equal(_get_implementing_args([array, subarray, other]),
[subarray, array, other])
assert_equal(_get_implementing_args([array, other, subarray]),
[subarray, array, other])
def test_many_duck_arrays(self):
class A:
__array_function__ = _return_not_implemented
class B(A):
__array_function__ = _return_not_implemented
class C(A):
__array_function__ = _return_not_implemented
class D:
__array_function__ = _return_not_implemented
a = A()
b = B()
c = C()
d = D()
assert_equal(_get_implementing_args([1]), [])
assert_equal(_get_implementing_args([a]), [a])
assert_equal(_get_implementing_args([a, 1]), [a])
assert_equal(_get_implementing_args([a, a, a]), [a])
assert_equal(_get_implementing_args([a, d, a]), [a, d])
assert_equal(_get_implementing_args([a, b]), [b, a])
assert_equal(_get_implementing_args([b, a]), [b, a])
assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
def test_too_many_duck_arrays(self):
namespace = dict(__array_function__=_return_not_implemented)
types = [type('A' + str(i), (object,), namespace) for i in range(33)]
relevant_args = [t() for t in types]
actual = _get_implementing_args(relevant_args[:32])
assert_equal(actual, relevant_args[:32])
with assert_raises_regex(TypeError, 'distinct argument types'):
_get_implementing_args(relevant_args)
class TestNDArrayArrayFunction:
@requires_array_function
def test_method(self):
class Other:
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
array = np.array([1])
other = Other()
no_override_sub = array.view(NoOverrideSub)
override_sub = array.view(OverrideSub)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray,),
args=(array, 1.), kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, Other),
args=(array, other), kwargs={})
assert_(result is NotImplemented)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, NoOverrideSub),
args=(array, no_override_sub),
kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, OverrideSub),
args=(array, override_sub),
kwargs={})
assert_equal(result, 'original')
with assert_raises_regex(TypeError, 'no implementation found'):
np.concatenate((array, other))
expected = np.concatenate((array, array))
result = np.concatenate((array, no_override_sub))
assert_equal(result, expected.view(NoOverrideSub))
result = np.concatenate((array, override_sub))
assert_equal(result, expected.view(OverrideSub))
def test_no_wrapper(self):
# This shouldn't happen unless a user intentionally calls
# __array_function__ with invalid arguments, but check that we raise
# an appropriate error all the same.
array = np.array(1)
func = lambda x: x
with assert_raises_regex(AttributeError, '_implementation'):
array.__array_function__(func=func, types=(np.ndarray,),
args=(array,), kwargs={})
@requires_array_function
class TestArrayFunctionDispatch:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
roundtripped = pickle.loads(
pickle.dumps(dispatched_one_arg, protocol=proto))
assert_(roundtripped is dispatched_one_arg)
def test_name_and_docstring(self):
assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
if sys.flags.optimize < 2:
assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
def test_interface(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return (self, func, types, args, kwargs)
original = MyArray()
(obj, func, types, args, kwargs) = dispatched_one_arg(original)
assert_(obj is original)
assert_(func is dispatched_one_arg)
assert_equal(set(types), {MyArray})
# assert_equal uses the overloaded np.iscomplexobj() internally
assert_(args == (original,))
assert_equal(kwargs, {})
def test_not_implemented(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return NotImplemented
array = MyArray()
with assert_raises_regex(TypeError, 'no implementation found'):
dispatched_one_arg(array)
@requires_array_function
class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
verify_matching_signatures(lambda x: 0, lambda x: 0)
verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda a: 0, lambda b: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
def test_array_function_dispatch(self):
with assert_raises(RuntimeError):
@array_function_dispatch(lambda x: (x,))
def f(y):
pass
# should not raise
@array_function_dispatch(lambda x: (x,), verify=False)
def f(y):
pass
def _new_duck_type_and_implements():
"""Create a duck array type and implements functions."""
HANDLED_FUNCTIONS = {}
class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
if not all(issubclass(t, MyArray) for t in types):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def implements(numpy_function):
"""Register an __array_function__ implementations."""
def decorator(func):
HANDLED_FUNCTIONS[numpy_function] = func
return func
return decorator
return (MyArray, implements)
@requires_array_function
class TestArrayFunctionImplementation:
def test_one_arg(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(dispatched_one_arg)
def _(array):
return 'myarray'
assert_equal(dispatched_one_arg(1), 'original')
assert_equal(dispatched_one_arg(MyArray()), 'myarray')
def test_optional_args(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array, option=None: (array,))
def func_with_option(array, option='default'):
return option
@implements(func_with_option)
def my_array_func_with_option(array, new_option='myarray'):
return new_option
# we don't need to implement every option on __array_function__
# implementations
assert_equal(func_with_option(1), 'default')
assert_equal(func_with_option(1, option='extra'), 'extra')
assert_equal(func_with_option(MyArray()), 'myarray')
with assert_raises(TypeError):
func_with_option(MyArray(), option='extra')
# but new options on implementations can't be used
result = my_array_func_with_option(MyArray(), new_option='yes')
assert_equal(result, 'yes')
with assert_raises(TypeError):
func_with_option(MyArray(), new_option='no')
def test_not_implemented(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array: (array,), module='my')
def func(array):
return array
array = np.array(1)
assert_(func(array) is array)
assert_equal(func.__module__, 'my')
with assert_raises_regex(
TypeError, "no implementation found for 'my.func'"):
func(MyArray())
class TestNDArrayMethods:
def test_repr(self):
# gh-12162: should still be defined even if __array_function__ doesn't
# implement np.array_repr()
class MyArray(np.ndarray):
def __array_function__(*args, **kwargs):
return NotImplemented
array = np.array(1).view(MyArray)
assert_equal(repr(array), 'MyArray(1)')
assert_equal(str(array), '1')
class TestNumPyFunctions:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
assert_equal(np.char.equal.__module__, 'numpy.char')
assert_equal(np.fft.fft.__module__, 'numpy.fft')
assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
def test_inspect_sum(self):
signature = inspect.signature(np.sum)
assert_('axis' in signature.parameters)
@requires_array_function
def test_override_sum(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(np.sum)
def _(array):
return 'yes'
assert_equal(np.sum(MyArray()), 'yes')
@requires_array_function
def test_sum_on_mock_array(self):
# We need a proxy for mocks because __array_function__ is only looked
# up in the class dict
class ArrayProxy:
def __init__(self, value):
self.value = value
def __array_function__(self, *args, **kwargs):
return self.value.__array_function__(*args, **kwargs)
def __array__(self, *args, **kwargs):
return self.value.__array__(*args, **kwargs)
proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
proxy.value.__array_function__.return_value = 1
result = np.sum(proxy)
assert_equal(result, 1)
proxy.value.__array_function__.assert_called_once_with(
np.sum, (ArrayProxy,), (proxy,), {})
proxy.value.__array__.assert_not_called()
@requires_array_function
def test_sum_forwarding_implementation(self):
class MyArray(np.ndarray):
def sum(self, axis, out):
return 'summed'
def __array_function__(self, func, types, args, kwargs):
return super().__array_function__(func, types, args, kwargs)
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
class TestArrayLike:
def setup_method(self):
class MyArray():
def __init__(self, function=None):
self.function = function
def __array_function__(self, func, types, args, kwargs):
assert func is getattr(np, func.__name__)
try:
my_func = getattr(self, func.__name__)
except AttributeError:
return NotImplemented
return my_func(*args, **kwargs)
self.MyArray = MyArray
class MyNoArrayFunctionArray():
def __init__(self, function=None):
self.function = function
self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
def add_method(self, name, arr_class, enable_value_error=False):
def _definition(*args, **kwargs):
# Check that `like=` isn't propagated downstream
assert 'like' not in kwargs
if enable_value_error and 'value_error' in kwargs:
raise ValueError
return arr_class(getattr(arr_class, name))
setattr(arr_class, name, _definition)
def func_args(*args, **kwargs):
return args, kwargs
@requires_array_function
def test_array_like_not_implemented(self):
self.add_method('array', self.MyArray)
ref = self.MyArray.array()
with assert_raises_regex(TypeError, 'no implementation found'):
array_like = np.asarray(1, like=ref)
_array_tests = [
('array', *func_args((1,))),
('asarray', *func_args((1,))),
('asanyarray', *func_args((1,))),
('ascontiguousarray', *func_args((2, 3))),
('asfortranarray', *func_args((2, 3))),
('require', *func_args((np.arange(6).reshape(2, 3),),
requirements=['A', 'F'])),
('empty', *func_args((1,))),
('full', *func_args((1,), 2)),
('ones', *func_args((1,))),
('zeros', *func_args((1,))),
('arange', *func_args(3)),
('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
('fromiter', *func_args(range(3), dtype=int)),
('fromstring', *func_args('1,2', dtype=int, sep=',')),
('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
('genfromtxt', *func_args(lambda: StringIO(u'1,2.1'),
dtype=[('int', 'i8'), ('float', 'f8')],
delimiter=',')),
]
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('numpy_ref', [True, False])
@requires_array_function
def test_array_like(self, function, args, kwargs, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method(function, self.MyArray)
np_func = getattr(np, function)
my_func = getattr(self.MyArray, function)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
array_like = np_func(*like_args, **kwargs, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_args = tuple(a() if callable(a) else a for a in args)
np_arr = np_func(*np_args, **kwargs)
# Special-case np.empty to ensure values match
if function == "empty":
np_arr.fill(1)
array_like.fill(1)
assert_equal(array_like, np_arr)
else:
assert type(array_like) is self.MyArray
assert array_like.function is my_func
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
@requires_array_function
def test_no_array_function_like(self, function, args, kwargs, ref):
self.add_method('array', self.MyNoArrayFunctionArray)
self.add_method(function, self.MyNoArrayFunctionArray)
np_func = getattr(np, function)
# Instantiate ref if it's the MyNoArrayFunctionArray class
if ref == "MyNoArrayFunctionArray":
ref = self.MyNoArrayFunctionArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
with assert_raises_regex(TypeError,
'The `like` argument must be an array-like that implements'):
np_func(*like_args, **kwargs, like=ref)
@pytest.mark.parametrize('numpy_ref', [True, False])
def test_array_like_fromfile(self, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method("fromfile", self.MyArray)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
data = np.random.random(5)
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, "testfile")
data.tofile(fname)
array_like = np.fromfile(fname, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_res = np.fromfile(fname, like=ref)
assert_equal(np_res, data)
assert_equal(array_like, np_res)
else:
assert type(array_like) is self.MyArray
assert array_like.function is self.MyArray.fromfile
@requires_array_function
def test_exception_handling(self):
self.add_method('array', self.MyArray, enable_value_error=True)
ref = self.MyArray.array()
with assert_raises(TypeError):
# Raises the error about `value_error` being invalid first
np.array(1, value_error=True, like=ref)
| 20,200 | Python | 33.472696 | 79 | 0.582178 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_dtype.py | import sys
import operator
import pytest
import ctypes
import gc
import types
from typing import Any
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
IS_PYSTON)
from numpy.compat import pickle
from itertools import permutations
import random
import hypothesis
from hypothesis.extra import numpy as hynp
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
def test_richcompare_invalid_dtype_equality(self):
# Make sure objects that cannot be converted to valid
# dtypes results in False/True when compared to valid dtypes.
# Here 7 cannot be converted to dtype. No exceptions should be raised
assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
@pytest.mark.parametrize(
'operation',
[operator.le, operator.lt, operator.ge, operator.gt])
def test_richcompare_invalid_dtype_comparison(self, operation):
# Make sure TypeError is raised for comparison operators
# for invalid dtypes. Here 7 is an invalid dtype.
with pytest.raises(TypeError):
operation(np.dtype(np.int32), 7)
@pytest.mark.parametrize("dtype",
['Bool', 'Bytes0', 'Complex32', 'Complex64',
'Datetime64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64',
'Object0', 'Str0', 'Timedelta64',
'UInt8', 'UInt16', 'Uint32', 'UInt32',
'Uint64', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['i4', 'f4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# This is an safe cast (not equiv) due to the different names:
assert np.can_cast(x, y, casting="safe")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def test_aligned_empty(self):
# Mainly regression test for gh-19696: construction failed completely
dt = np.dtype([], align=True)
assert dt == np.dtype([])
dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
assert dt == np.dtype([])
def test_subarray_base_item(self):
arr = np.ones(3, dtype=[("f", "i", 3)])
# Extracting the field "absorbs" the subarray into a view:
assert arr["f"].base is arr
# Extract the structured item, and then check the tuple component:
item = arr.item(0)
assert type(item) is tuple and len(item) == 1
assert item[0].base is arr
def test_subarray_cast_copies(self):
# Older versions of NumPy did NOT copy, but they got the ownership
# wrong (not actually knowing the correct base!). Versions since 1.21
# (I think) crashed fairly reliable. This defines the correct behavior
# as a copy. Keeping the ownership would be possible (but harder)
arr = np.ones(3, dtype=[("f", "i", 3)])
cast = arr.astype(object)
for fields in cast:
assert type(fields) == tuple and len(fields) == 1
subarr = fields[0]
assert subarr.base is None
assert subarr.flags.owndata
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
# Use two small negative values (should be singletons, but less likely
# to run into race-conditions). This failed in some threaded envs
# When using 0 and 1. If it fails again, should remove all explicit
# checks, and rely on `pytest-leaks` reference count checker only.
val0 = -4
val1 = -5
arr = np.full(shape, val0, dt)
gc.collect()
before_val0 = sys.getrefcount(val0)
before_val1 = sys.getrefcount(val1)
# Test item getting:
part = arr[index]
after_val0 = sys.getrefcount(val0)
assert after_val0 - before_val0 == count * items_changed
del part
# Test item setting:
arr[index] = val1
gc.collect()
after_val0 = sys.getrefcount(val0)
after_val1 = sys.getrefcount(val1)
assert before_val0 - after_val0 == count * items_changed
assert after_val1 - before_val1 == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_list_recursion(self):
l = list()
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_dict_recursion(self):
d = dict(names=['self'], formats=[None], offsets=[0])
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names': ['top', 'bottom'],"
" 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]],"
" 'offsets': [0, 76800],"
" 'itemsize': 80000,"
" 'aligned': True}")
with np.printoptions(legacy='1.21'):
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['r', 'b'],"
" 'formats': ['u1', 'u1'],"
" 'offsets': [0, 2],"
" 'titles': ['Red pixel', 'Blue pixel'],"
" 'itemsize': 3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names': ['r', 'b'], "
"'formats': ['u1', 'u1'], "
"'offsets': [0, 2], "
"'titles': ['Red pixel', 'Blue pixel'], "
"'itemsize': 4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
def test_zero_stride(self):
arr = np.ones(1, dtype="i8")
arr = np.broadcast_to(arr, 10)
assert arr.strides == (0,)
with pytest.raises(ValueError):
arr.dtype = "i1"
class TestDTypeMakeCanonical:
def check_canonical(self, dtype, canonical):
"""
Check most properties relevant to "canonical" versions of a dtype,
which is mainly native byte order for datatypes supporting this.
The main work is checking structured dtypes with fields, where we
reproduce most the actual logic used in the C-code.
"""
assert type(dtype) is type(canonical)
# a canonical DType should always have equivalent casting (both ways)
assert np.can_cast(dtype, canonical, casting="equiv")
assert np.can_cast(canonical, dtype, casting="equiv")
# a canonical dtype (and its fields) is always native (checks fields):
assert canonical.isnative
# Check that canonical of canonical is the same (no casting):
assert np.result_type(canonical) == canonical
if not dtype.names:
# The flags currently never change for unstructured dtypes
assert dtype.flags == canonical.flags
return
# Must have all the needs API flag set:
assert dtype.flags & 0b10000
# Check that the fields are identical (including titles):
assert dtype.fields.keys() == canonical.fields.keys()
def aligned_offset(offset, alignment):
# round up offset:
return - (-offset // alignment) * alignment
totalsize = 0
max_alignment = 1
for name in dtype.names:
# each field is also canonical:
new_field_descr = canonical.fields[name][0]
self.check_canonical(dtype.fields[name][0], new_field_descr)
# Must have the "inherited" object related flags:
expected = 0b11011 & new_field_descr.flags
assert (canonical.flags & expected) == expected
if canonical.isalignedstruct:
totalsize = aligned_offset(totalsize, new_field_descr.alignment)
max_alignment = max(new_field_descr.alignment, max_alignment)
assert canonical.fields[name][1] == totalsize
# if a title exists, they must match (otherwise empty tuple):
assert dtype.fields[name][2:] == canonical.fields[name][2:]
totalsize += new_field_descr.itemsize
if canonical.isalignedstruct:
totalsize = aligned_offset(totalsize, max_alignment)
assert canonical.itemsize == totalsize
assert canonical.alignment == max_alignment
def test_simple(self):
dt = np.dtype(">i4")
assert np.result_type(dt).isnative
assert np.result_type(dt).num == dt.num
# dtype with empty space:
struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
canonical = np.result_type(struct_dt)
assert canonical.itemsize == 4+8
assert canonical.isnative
# aligned struct dtype with empty space:
struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
canonical = np.result_type(struct_dt)
assert canonical.isalignedstruct
assert canonical.itemsize == np.dtype("i8").alignment + 8
assert canonical.isnative
def test_object_flag_not_inherited(self):
# The following dtype still indicates "object", because its included
# in the unaccessible space (maybe this could change at some point):
arr = np.ones(3, "i,O,i")[["f0", "f2"]]
assert arr.dtype.hasobject
canonical_dt = np.result_type(arr.dtype)
assert not canonical_dt.hasobject
@pytest.mark.slow
@hypothesis.given(dtype=hynp.nested_dtypes())
def test_make_canonical_hypothesis(self, dtype):
canonical = np.result_type(dtype)
self.check_canonical(dtype, canonical)
# result_type with two arguments should always give identical results:
two_arg_result = np.result_type(dtype, dtype)
assert np.can_cast(two_arg_result, canonical, casting="no")
@pytest.mark.slow
@hypothesis.given(
dtype=hypothesis.extra.numpy.array_dtypes(
subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
min_size=5, max_size=10, allow_subarrays=True))
def test_structured(self, dtype):
# Pick 4 of the fields at random. This will leave empty space in the
# dtype (since we do not canonicalize it here).
field_subset = random.sample(dtype.names, k=4)
dtype_with_empty_space = dtype[field_subset]
assert dtype_with_empty_space.itemsize == dtype.itemsize
canonicalized = np.result_type(dtype_with_empty_space)
self.check_canonical(dtype_with_empty_space, canonicalized)
# promotion with two arguments should always give identical results:
two_arg_result = np.promote_types(
dtype_with_empty_space, dtype_with_empty_space)
assert np.can_cast(two_arg_result, canonicalized, casting="no")
# Ensure that we also check aligned struct (check the opposite, in
# case hypothesis grows support for `align`. Then repeat the test:
dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
dtype_with_empty_space = dtype_aligned[field_subset]
assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
canonicalized = np.result_type(dtype_with_empty_space)
self.check_canonical(dtype_with_empty_space, canonicalized)
# promotion with two arguments should always give identical results:
two_arg_result = np.promote_types(
dtype_with_empty_space, dtype_with_empty_space)
assert np.can_cast(two_arg_result, canonicalized, casting="no")
class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
buf = pickle.dumps(dtype, proto)
# The dtype pickling itself pickles `np.dtype` if it is pickled
# as a singleton `dtype` should be stored in the buffer:
assert b"_DType_reconstruct" not in buf
assert b"dtype" in buf
pickled = pickle.loads(buf)
assert_equal(pickled, dtype)
assert_equal(pickled.descr, dtype.descr)
if dtype.metadata is not None:
assert_equal(pickled.metadata, dtype.metadata)
# Check the reconstructed dtype is functional
x = np.zeros(3, dtype=dtype)
y = np.zeros(3, dtype=pickled)
assert_equal(x, y)
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
def test_structured(self):
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
self.check_pickling(dt)
def test_structured_aligned(self):
dt = np.dtype('i4, i1', align=True)
self.check_pickling(dt)
def test_structured_unaligned(self):
dt = np.dtype('i4, i1', align=False)
self.check_pickling(dt)
def test_structured_padded(self):
dt = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
self.check_pickling(dt)
def test_structured_titles(self):
dt = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
self.check_pickling(dt)
@pytest.mark.parametrize('base', ['m8', 'M8'])
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
def test_datetime(self, base, unit):
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
self.check_pickling(dt)
if unit:
dt = np.dtype('%s[7%s]' % (base, unit))
self.check_pickling(dt)
def test_metadata(self):
dt = np.dtype(int, metadata={'datum': 1})
self.check_pickling(dt)
@pytest.mark.parametrize("DType",
[type(np.dtype(t)) for t in np.typecodes['All']] +
[np.dtype(rational), np.dtype])
def test_pickle_types(self, DType):
# Check that DTypes (the classes/types) roundtrip when pickling
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
assert roundtrip_DType is DType
class TestPromotion:
"""Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py`
"""
@pytest.mark.parametrize(["other", "expected"],
[(2**16-1, np.complex64),
(2**32-1, np.complex128),
(np.float16(2), np.complex64),
(np.float32(2), np.complex64),
(np.longdouble(2), np.complex64),
# Base of the double value to sidestep any rounding issues:
(np.longdouble(np.nextafter(1.7e308, 0.)), np.complex128),
# Additionally use "nextafter" so the cast can't round down:
(np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
# repeat for complex scalars:
(np.complex64(2), np.complex64),
(np.clongdouble(2), np.complex64),
# Base of the double value to sidestep any rounding issues:
(np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.complex128),
# Additionally use "nextafter" so the cast can't round down:
(np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
])
def test_complex_other_value_based(self, other, expected):
# This would change if we modify the value based promotion
min_complex = np.dtype(np.complex64)
res = np.result_type(other, min_complex)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
assert res == expected
@pytest.mark.parametrize(["other", "expected"],
[(np.bool_, np.complex128),
(np.int64, np.complex128),
(np.float16, np.complex64),
(np.float32, np.complex64),
(np.float64, np.complex128),
(np.longdouble, np.clongdouble),
(np.complex64, np.complex64),
(np.complex128, np.complex128),
(np.clongdouble, np.clongdouble),
])
def test_complex_scalar_value_based(self, other, expected):
# This would change if we modify the value based promotion
complex_scalar = 1j
res = np.result_type(other, complex_scalar)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
assert res == expected
def test_complex_pyscalar_promote_rational(self):
with pytest.raises(TypeError,
match=r".* do not have a common DType"):
np.result_type(1j, rational)
with pytest.raises(TypeError,
match=r".* no common DType exists for the given inputs"):
np.result_type(1j, rational(1, 2))
@pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100])
def test_python_integer_promotion(self, val):
# If we only path scalars (mainly python ones!), the result must take
# into account that the integer may be considered int32, int64, uint64,
# or object depending on the input value. So test those paths!
expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype)
assert np.result_type(val, 0) == expected_dtype
# For completeness sake, also check with a NumPy scalar as second arg:
assert np.result_type(val, np.int8(0)) == expected_dtype
@pytest.mark.parametrize(["other", "expected"],
[(1, rational), (1., np.float64)])
def test_float_int_pyscalar_promote_rational(self, other, expected):
# Note that rationals are a bit akward as they promote with float64
# or default ints, but not float16 or uint8/int8 (which looks
# inconsistent here)
with pytest.raises(TypeError,
match=r".* do not have a common DType"):
np.result_type(other, rational)
assert np.result_type(other, rational(1, 2)) == expected
@pytest.mark.parametrize(["dtypes", "expected"], [
# These promotions are not associative/commutative:
([np.uint16, np.int16, np.float16], np.float32),
([np.uint16, np.int8, np.float16], np.float32),
([np.uint8, np.int16, np.float16], np.float32),
# The following promotions are not ambiguous, but cover code
# paths of abstract promotion (no particular logic being tested)
([1, 1, np.float64], np.float64),
([1, 1., np.complex128], np.complex128),
([1, 1j, np.float64], np.complex128),
([1., 1., np.int64], np.float64),
([1., 1j, np.float64], np.complex128),
([1j, 1j, np.float64], np.complex128),
([1, True, np.bool_], np.int_),
])
def test_permutations_do_not_influence_result(self, dtypes, expected):
# Tests that most permutations do not influence the result. In the
# above some uint and int combintations promote to a larger integer
# type, which would then promote to a larger than necessary float.
for perm in permutations(dtypes):
assert np.result_type(*perm) == expected
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
def test_dtypes_are_true():
# test for gh-6294
assert bool(np.dtype('f8'))
assert bool(np.dtype('i8'))
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
def test_keyword_argument():
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
def test_ulong_dtype():
# test for gh-21063
assert np.dtype("ulong") == np.dtype(np.uint)
class TestFromDTypeAttribute:
def test_simple(self):
class dt:
dtype = np.dtype("f8")
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_recursion(self):
class dt:
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt_instance)
def test_void_subtype(self):
class dt(np.void):
# This code path is fully untested before, so it is unclear
# what this should be useful for. Note that if np.void is used
# numpy will think we are deallocating a base type [1.17, 2019-02].
dtype = np.dtype("f,f")
np.dtype(dt)
np.dtype(dt(1))
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_void_subtype_recursion(self):
class vdt(np.void):
pass
vdt.dtype = vdt
with pytest.raises(RecursionError):
np.dtype(vdt)
with pytest.raises(RecursionError):
np.dtype(vdt(1))
class TestDTypeClasses:
@pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
def test_basic_dtypes_subclass_properties(self, dtype):
# Note: Except for the isinstance and type checks, these attributes
# are considered currently private and may change.
dtype = np.dtype(dtype)
assert isinstance(dtype, np.dtype)
assert type(dtype) is not np.dtype
assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]"
assert type(dtype).__module__ == "numpy"
assert not type(dtype)._abstract
# the flexible dtypes and datetime/timedelta have additional parameters
# which are more than just storage information, these would need to be
# given when creating a dtype:
parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
if dtype.type not in parametric:
assert not type(dtype)._parametric
assert type(dtype)() is dtype
else:
assert type(dtype)._parametric
with assert_raises(TypeError):
type(dtype)()
def test_dtype_superclass(self):
assert type(np.dtype) is not type
assert isinstance(np.dtype, type)
assert type(np.dtype).__name__ == "_DTypeMeta"
assert type(np.dtype).__module__ == "numpy"
assert np.dtype._abstract
class TestFromCTypes:
@staticmethod
def check(ctype, dtype):
dtype = np.dtype(dtype)
assert_equal(np.dtype(ctype), dtype)
assert_equal(np.dtype(ctype()), dtype)
def test_array(self):
c8 = ctypes.c_uint8
self.check( 3 * c8, (np.uint8, (3,)))
self.check( 1 * c8, (np.uint8, (1,)))
self.check( 0 * c8, (np.uint8, (0,)))
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
def test_padded_structure(self):
class PaddedStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
], align=True)
self.check(PaddedStruct, expected)
def test_bit_fields(self):
class BitfieldStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8, 7),
('b', ctypes.c_uint8, 1)
]
assert_raises(TypeError, np.dtype, BitfieldStruct)
assert_raises(TypeError, np.dtype, BitfieldStruct())
def test_pointer(self):
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
assert_raises(TypeError, np.dtype, p_uint8)
def test_void_pointer(self):
self.check(ctypes.c_void_p, np.uintp)
def test_union(self):
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
]
expected = np.dtype(dict(
names=['a', 'b'],
formats=[np.uint8, np.uint16],
offsets=[0, 0],
itemsize=2
))
self.check(Union, expected)
def test_union_with_struct_packed(self):
class Struct(ctypes.Structure):
_pack_ = 1
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_union_packed(self):
class Struct(ctypes.Structure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
class Union(ctypes.Union):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
])
self.check(PackedStructure, expected)
def test_large_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 2
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_uint16),
('e', ctypes.c_uint32),
('f', ctypes.c_uint32),
('g', ctypes.c_uint8)
]
expected = np.dtype(dict(
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
offsets=[0, 2, 4, 6, 8, 12, 16],
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
itemsize=18))
self.check(PackedStructure, expected)
def test_big_endian_structure_packed(self):
class BigEndStruct(ctypes.BigEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
self.check(BigEndStruct, expected)
def test_little_endian_structure_packed(self):
class LittleEndStruct(ctypes.LittleEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
self.check(LittleEndStruct, expected)
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '<B'),
('b', '<H')
], align=True)
self.check(PaddedStruct, expected)
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '>B'),
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
def test_simple_endian_types(self):
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
all_types = set(np.typecodes['All'])
all_pairs = permutations(all_types, 2)
@pytest.mark.parametrize("pair", all_pairs)
def test_pairs(self, pair):
"""
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
"""
# gh-5645: check that np.dtype('i,L') can be used
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
class TestUserDType:
@pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
def test_custom_structured_dtype(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
dt = create_custom_field_dtype(blueprint, mytype, 0)
assert dt.type == mytype
# We cannot (currently) *create* this dtype with `np.dtype` because
# mytype does not inherit from `np.generic`. This seems like an
# unnecessary restriction, but one that has been around forever:
assert np.dtype(mytype) == np.dtype("O")
def test_custom_structured_dtype_errors(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
with pytest.raises(ValueError):
# Tests what happens if fields are unset during creation
# which is currently rejected due to the containing object
# (see PyArray_RegisterDataType).
create_custom_field_dtype(blueprint, mytype, 1)
with pytest.raises(RuntimeError):
# Tests that a dtype must have its type field set up to np.dtype
# or in this case a builtin instance.
create_custom_field_dtype(blueprint, mytype, 2)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
def test_dtype(self) -> None:
alias = np.dtype[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.dtype
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_dtype_subclass(self, code: str) -> None:
cls = type(np.dtype(code))
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.dtype[arg_tup]
else:
with pytest.raises(TypeError):
np.dtype[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.dtype[Any]
def test_result_type_integers_and_unitless_timedelta64():
# Regression test for gh-20077. The following call of `result_type`
# would cause a seg. fault.
td = np.timedelta64(4)
result = np.result_type(0, td)
assert_dtype_equal(result, td.dtype)
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
def test_class_getitem_38() -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
np.dtype[Any]
| 70,825 | Python | 38.545505 | 96 | 0.518532 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_numerictypes.py | import sys
import itertools
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9),
]
byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
def normalize_descr(descr):
"Normalize a description adding the platform byteorder."
out = []
for item in descr:
dtype = item[1]
if isinstance(dtype, str):
if dtype[0] not in ['|', '<', '>']:
onebyte = dtype[1:] == "1"
if onebyte or dtype[0] in ['S', 'V', 'b']:
dtype = "|" + dtype
else:
dtype = byteorder + dtype
if len(item) > 2 and np.prod(item[2]) > 1:
nitem = (item[0], dtype, item[2])
else:
nitem = (item[0], dtype)
out.append(nitem)
elif isinstance(dtype, list):
l = normalize_descr(dtype)
out.append((item[0], l))
else:
raise ValueError("Expected a str or list and got %s" %
(type(item)))
return out
############################################################
# Creation tests
############################################################
class CreateZeros:
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
h = np.zeros((), dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
assert_(h.dtype.fields['x'][0].name[:4] == 'void')
assert_(h.dtype.fields['x'][0].char == 'V')
assert_(h.dtype.fields['x'][0].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((), dtype='u1'))
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
h = np.zeros((2,), dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
assert_(h.dtype['y'].name[:4] == 'void')
assert_(h.dtype['y'].char == 'V')
assert_(h.dtype['y'].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
h = np.zeros((2, 3), dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
assert_(h.dtype['z'].name == 'uint8')
assert_(h.dtype['z'].char == 'B')
assert_(h.dtype['z'].type == np.uint8)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
class TestCreateZerosPlain(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
_descr = Pdescr
class TestCreateZerosNested(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
class CreateValues:
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
"""Check creation from tuples"""
h = np.array(self._buffer, dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
assert_(h.shape == (2,))
else:
assert_(h.shape == ())
def test_list_of_tuple(self):
"""Check creation from list of tuples"""
h = np.array([self._buffer], dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
assert_(h.shape == (1, 2))
else:
assert_(h.shape == (1,))
def test_list_of_list_of_tuple(self):
"""Check creation from list of list of tuples"""
h = np.array([[self._buffer]], dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
assert_(h.shape == (1, 1, 2))
else:
assert_(h.shape == (1, 1))
class TestCreateValuesPlainSingle(CreateValues):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
class TestCreateValuesPlainMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
class TestCreateValuesNestedSingle(CreateValues):
"""Check the creation of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = 0
_buffer = NbufferT[0]
class TestCreateValuesNestedMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
_buffer = NbufferT
############################################################
# Reading tests
############################################################
class ReadValuesPlain:
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
else:
assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][1],
self._buffer[1][1]], dtype='f8'))
assert_equal(h['z'], np.array([self._buffer[0][2],
self._buffer[1][2]], dtype='u1'))
class TestReadValuesPlainSingle(ReadValuesPlain):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
class TestReadValuesPlainMultiple(ReadValuesPlain):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
class ReadValuesNested:
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
"""Check reading the top fields of a nested array"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
else:
assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][4],
self._buffer[1][4]], dtype='f8'))
assert_equal(h['z'], np.array([self._buffer[0][5],
self._buffer[1][5]], dtype='u1'))
def test_nested1_acessors(self):
"""Check reading the nested fields of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_equal(h['Info']['value'],
np.array(self._buffer[1][0], dtype='c16'))
assert_equal(h['Info']['y2'],
np.array(self._buffer[1][1], dtype='f8'))
assert_equal(h['info']['Name'],
np.array(self._buffer[3][0], dtype='U2'))
assert_equal(h['info']['Value'],
np.array(self._buffer[3][1], dtype='c16'))
else:
assert_equal(h['Info']['value'],
np.array([self._buffer[0][1][0],
self._buffer[1][1][0]],
dtype='c16'))
assert_equal(h['Info']['y2'],
np.array([self._buffer[0][1][1],
self._buffer[1][1][1]],
dtype='f8'))
assert_equal(h['info']['Name'],
np.array([self._buffer[0][3][0],
self._buffer[1][3][0]],
dtype='U2'))
assert_equal(h['info']['Value'],
np.array([self._buffer[0][3][1],
self._buffer[1][3][1]],
dtype='c16'))
def test_nested2_acessors(self):
"""Check reading the nested fields of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_equal(h['Info']['Info2']['value'],
np.array(self._buffer[1][2][1], dtype='c16'))
assert_equal(h['Info']['Info2']['z3'],
np.array(self._buffer[1][2][3], dtype='u4'))
else:
assert_equal(h['Info']['Info2']['value'],
np.array([self._buffer[0][1][2][1],
self._buffer[1][1][2][1]],
dtype='c16'))
assert_equal(h['Info']['Info2']['z3'],
np.array([self._buffer[0][1][2][3],
self._buffer[1][1][2][3]],
dtype='u4'))
def test_nested1_descriptor(self):
"""Check access nested descriptors of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
assert_(h.dtype['Info']['value'].name == 'complex128')
assert_(h.dtype['Info']['y2'].name == 'float64')
assert_(h.dtype['info']['Name'].name == 'str256')
assert_(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
"""Check access nested descriptors of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
class TestReadValuesNestedSingle(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
class TestReadValuesNestedMultiple(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = True
_buffer = NbufferT
class TestEmptyField:
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
class TestCommonType:
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
def test_scalar_loses2(self):
res = np.find_common_type(['f4', 'f4'], ['i8'])
assert_(res == 'f4')
def test_scalar_wins(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
assert_(res == 'c8')
def test_scalar_wins2(self):
res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
assert_(res == 'f8')
def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
class TestMultipleFields:
def setup_method(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary['f0', 'f1']
def test_no_tuple(self):
assert_raises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
class TestIsSubDType:
# scalar types can be promoted into dtypes
wrappers = [np.dtype, lambda x: x]
def test_both_abstract(self):
assert_(np.issubdtype(np.floating, np.inexact))
assert_(not np.issubdtype(np.inexact, np.floating))
def test_same(self):
for cls in (np.float32, np.int32):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(np.issubdtype(w1(cls), w2(cls)))
def test_subclass(self):
# note we cannot promote floating to a dtype, as it would turn into a
# concrete type
for w in self.wrappers:
assert_(np.issubdtype(w(np.float32), np.floating))
assert_(np.issubdtype(w(np.float64), np.floating))
def test_subclass_backwards(self):
for w in self.wrappers:
assert_(not np.issubdtype(np.floating, w(np.float32)))
assert_(not np.issubdtype(np.floating, w(np.float64)))
def test_sibling_class(self):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
def test_nondtype_nonscalartype(self):
# See gh-14619 and gh-9505 which introduced the deprecation to fix
# this. These tests are directly taken from gh-9505
assert not np.issubdtype(np.float32, 'float64')
assert not np.issubdtype(np.float32, 'f8')
assert not np.issubdtype(np.int32, str)
assert not np.issubdtype(np.int32, 'int64')
assert not np.issubdtype(np.str_, 'void')
# for the following the correct spellings are
# np.integer, np.floating, or np.complexfloating respectively:
assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_
assert not np.issubdtype(np.float32, float)
assert not np.issubdtype(np.complex64, complex)
assert not np.issubdtype(np.float32, "float")
assert not np.issubdtype(np.float64, "f")
# Test the same for the correct first datatype and abstract one
# in the case of int, float, complex:
assert np.issubdtype(np.float64, 'float64')
assert np.issubdtype(np.float64, 'f8')
assert np.issubdtype(np.str_, str)
assert np.issubdtype(np.int64, 'int64')
assert np.issubdtype(np.void, 'void')
assert np.issubdtype(np.int8, np.integer)
assert np.issubdtype(np.float32, np.floating)
assert np.issubdtype(np.complex64, np.complexfloating)
assert np.issubdtype(np.float64, "float")
assert np.issubdtype(np.float32, "f")
class TestSctypeDict:
def test_longdouble(self):
assert_(np.sctypeDict['f8'] is not np.longdouble)
assert_(np.sctypeDict['c16'] is not np.clongdouble)
def test_ulong(self):
# Test that 'ulong' behaves like 'long'. np.sctypeDict['long'] is an
# alias for np.int_, but np.long is not supported for historical
# reasons (gh-21063)
assert_(np.sctypeDict['ulong'] is np.uint)
assert_(not hasattr(np, 'ulong'))
class TestBitName:
def test_abstract(self):
assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
class TestMaximumSctype:
# note that parametrizing with sctype['int'] and similar would skip types
# with the same size (gh-11923)
@pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
def test_int(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
@pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
def test_uint(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
@pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
def test_float(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
@pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
def test_complex(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
@pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void])
def test_other(self, t):
assert_equal(np.maximum_sctype(t), t)
class Test_sctype2char:
# This function is old enough that we're really just documenting the quirks
# at this point.
def test_scalar_type(self):
assert_equal(np.sctype2char(np.double), 'd')
assert_equal(np.sctype2char(np.int_), 'l')
assert_equal(np.sctype2char(np.unicode_), 'U')
assert_equal(np.sctype2char(np.bytes_), 'S')
def test_other_type(self):
assert_equal(np.sctype2char(float), 'd')
assert_equal(np.sctype2char(list), 'O')
assert_equal(np.sctype2char(np.ndarray), 'O')
def test_third_party_scalar_type(self):
from numpy.core._rational_tests import rational
assert_raises(KeyError, np.sctype2char, rational)
assert_raises(KeyError, np.sctype2char, rational(1))
def test_array_instance(self):
assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
def test_abstract_type(self):
assert_raises(KeyError, np.sctype2char, np.floating)
def test_non_type(self):
assert_raises(ValueError, np.sctype2char, 1)
@pytest.mark.parametrize("rep, expected", [
(np.int32, True),
(list, False),
(1.1, False),
(str, True),
(np.dtype(np.float64), True),
(np.dtype((np.int16, (3, 4))), True),
(np.dtype([('a', np.int8)]), True),
])
def test_issctype(rep, expected):
# ensure proper identification of scalar
# data-types by issctype()
actual = np.issctype(rep)
assert_equal(actual, expected)
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
class TestDocStrings:
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_('int64' in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_('int64' in np.longlong.__doc__)
class TestScalarTypeNames:
# gh-9799
numeric_types = [
np.byte, np.short, np.intc, np.int_, np.longlong,
np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
np.half, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble,
]
def test_names_are_unique(self):
# none of the above may be aliases for each other
assert len(set(self.numeric_types)) == len(self.numeric_types)
# names must be unique
names = [t.__name__ for t in self.numeric_types]
assert len(set(names)) == len(names)
@pytest.mark.parametrize('t', numeric_types)
def test_names_reflect_attributes(self, t):
""" Test that names correspond to where the type is under ``np.`` """
assert getattr(np, t.__name__) is t
@pytest.mark.parametrize('t', numeric_types)
def test_names_are_undersood_by_dtype(self, t):
""" Test the dtype constructor maps names back to the type """
assert np.dtype(t.__name__).type is t
| 21,152 | Python | 36.63879 | 119 | 0.536781 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_array_coercion.py | """
Tests for array coercion, mainly through testing `np.array` results directly.
Note that other such tests exist e.g. in `test_api.py` and many corner-cases
are tested (sometimes indirectly) elsewhere.
"""
import pytest
from pytest import param
from itertools import product
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_umath import _discover_array_parameters
from numpy.testing import (
assert_array_equal, assert_warns, IS_PYPY)
def arraylikes():
"""
Generator for functions converting an array into various array-likes.
If full is True (default) includes array-likes not capable of handling
all dtypes
"""
# base array:
def ndarray(a):
return a
yield param(ndarray, id="ndarray")
# subclass:
class MyArr(np.ndarray):
pass
def subclass(a):
return a.view(MyArr)
yield subclass
class _SequenceLike():
# We are giving a warning that array-like's were also expected to be
# sequence-like in `np.array([array_like])`, this can be removed
# when the deprecation exired (started NumPy 1.20)
def __len__(self):
raise TypeError
def __getitem__(self):
raise TypeError
# Array-interface
class ArrayDunder(_SequenceLike):
def __init__(self, a):
self.a = a
def __array__(self, dtype=None):
return self.a
yield param(ArrayDunder, id="__array__")
# memory-view
yield param(memoryview, id="memoryview")
# Array-interface
class ArrayInterface(_SequenceLike):
def __init__(self, a):
self.a = a # need to hold on to keep interface valid
self.__array_interface__ = a.__array_interface__
yield param(ArrayInterface, id="__array_interface__")
# Array-Struct
class ArrayStruct(_SequenceLike):
def __init__(self, a):
self.a = a # need to hold on to keep struct valid
self.__array_struct__ = a.__array_struct__
yield param(ArrayStruct, id="__array_struct__")
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
# Hard-coded list of scalar instances.
# Floats:
yield param(np.sqrt(np.float16(5)), id="float16")
yield param(np.sqrt(np.float32(5)), id="float32")
yield param(np.sqrt(np.float64(5)), id="float64")
if extended_precision:
yield param(np.sqrt(np.longdouble(5)), id="longdouble")
# Complex:
yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
if extended_precision:
yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
# Bool:
# XFAIL: Bool should be added, but has some bad properties when it
# comes to strings, see also gh-9875
# yield param(np.bool_(0), id="bool")
# Integers:
yield param(np.int8(2), id="int8")
yield param(np.int16(2), id="int16")
yield param(np.int32(2), id="int32")
yield param(np.int64(2), id="int64")
yield param(np.uint8(2), id="uint8")
yield param(np.uint16(2), id="uint16")
yield param(np.uint32(2), id="uint32")
yield param(np.uint64(2), id="uint64")
# Rational:
if user_dtype:
yield param(rational(1, 2), id="rational")
# Cannot create a structured void scalar directly:
structured = np.array([(1, 3)], "i,i")[0]
assert isinstance(structured, np.void)
assert structured.dtype == np.dtype("i,i")
yield param(structured, id="structured")
if times:
# Datetimes and timedelta
yield param(np.timedelta64(2), id="timedelta64[generic]")
yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
# Strings and unstructured void:
yield param(np.bytes_(b"1234"), id="bytes")
yield param(np.unicode_("2345"), id="unicode")
yield param(np.void(b"4321"), id="unstructured_void")
def is_parametric_dtype(dtype):
"""Returns True if the the dtype is a parametric legacy dtype (itemsize
is 0, or a datetime without units)
"""
if dtype.itemsize == 0:
return True
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
if dtype.name.endswith("64"):
# Generic time units
return True
return False
class TestStringDiscovery:
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_basic_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
assert np.array(obj, dtype="S").dtype == expected
assert np.array([obj], dtype="S").dtype == expected
# A nested array is also discovered correctly
arr = np.array(obj, dtype="O")
assert np.array(arr, dtype="S").dtype == expected
# Check that .astype() behaves identical
assert arr.astype("S").dtype == expected
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_nested_arrays_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
arr = np.array(obj, dtype="O")
assert np.array([arr, arr], dtype="S").dtype == expected
@pytest.mark.parametrize("arraylike", arraylikes())
def test_unpack_first_level(self, arraylike):
# We unpack exactly one level of array likes
obj = np.array([None])
obj[0] = np.array(1.2)
# the length of the included item, not of the float dtype
length = len(str(obj[0]))
expected = np.dtype(f"S{length}")
obj = arraylike(obj)
# casting to string usually calls str(obj)
arr = np.array([obj], dtype="S")
assert arr.shape == (1, 1)
assert arr.dtype == expected
class TestScalarDiscovery:
def test_void_special_case(self):
# Void dtypes with structures discover tuples as elements
arr = np.array((1, 2, 3), dtype="i,i,i")
assert arr.shape == ()
arr = np.array([(1, 2, 3)], dtype="i,i,i")
assert arr.shape == (1,)
def test_char_special_case(self):
arr = np.array("string", dtype="c")
assert arr.shape == (6,)
assert arr.dtype.char == "c"
arr = np.array(["string"], dtype="c")
assert arr.shape == (1, 6)
assert arr.dtype.char == "c"
def test_char_special_case_deep(self):
# Check that the character special case errors correctly if the
# array is too deep:
nested = ["string"] # 2 dimensions (due to string being sequence)
for i in range(np.MAXDIMS - 2):
nested = [nested]
arr = np.array(nested, dtype='c')
assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
with pytest.raises(ValueError):
np.array([nested], dtype="c")
def test_unknown_object(self):
arr = np.array(object())
assert arr.shape == ()
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar(self, scalar):
arr = np.array(scalar)
assert arr.shape == ()
assert arr.dtype == scalar.dtype
arr = np.array([[scalar, scalar]])
assert arr.shape == (1, 2)
assert arr.dtype == scalar.dtype
# Additionally to string this test also runs into a corner case
# with datetime promotion (the difference is the promotion order).
@pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
def test_scalar_promotion(self):
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
sc1, sc2 = sc1.values[0], sc2.values[0]
# test all combinations:
try:
arr = np.array([sc1, sc2])
except (TypeError, ValueError):
# The promotion between two times can fail
# XFAIL (ValueError): Some object casts are currently undefined
continue
assert arr.shape == (2,)
try:
dt1, dt2 = sc1.dtype, sc2.dtype
expected_dtype = np.promote_types(dt1, dt2)
assert arr.dtype == expected_dtype
except TypeError as e:
# Will currently always go to object dtype
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar_coercion(self, scalar):
# This tests various scalar coercion paths, mainly for the numerical
# types. It includes some paths not directly related to `np.array`
if isinstance(scalar, np.inexact):
# Ensure we have a full-precision number if available
scalar = type(scalar)((scalar * 2)**0.5)
if type(scalar) is rational:
# Rational generally fails due to a missing cast. In the future
# object casts should automatically be defined based on `setitem`.
pytest.xfail("Rational to object cast is undefined currently.")
# Use casting from object:
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
# Test various ways to create an array containing this scalar:
arr1 = np.array(scalar).reshape(1)
arr2 = np.array([scalar])
arr3 = np.empty(1, dtype=scalar.dtype)
arr3[0] = scalar
arr4 = np.empty(1, dtype=scalar.dtype)
arr4[:] = [scalar]
# All of these methods should yield the same results
assert_array_equal(arr, arr1)
assert_array_equal(arr, arr2)
assert_array_equal(arr, arr3)
assert_array_equal(arr, arr4)
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("cast_to", scalar_instances())
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
"""
Test that in most cases:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
should behave the same. The only exceptions are paramteric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype # use to parametrize only the target dtype
for scalar in scalar_instances(times=False):
scalar = scalar.values[0]
if dtype.type == np.void:
if scalar.dtype.fields is not None and dtype.fields is None:
# Here, coercion to "V6" works, but the cast fails.
# Since the types are identical, SETITEM takes care of
# this, but has different rules than the cast.
with pytest.raises(TypeError):
np.array(scalar).astype(dtype)
np.array(scalar, dtype=dtype)
np.array([scalar], dtype=dtype)
continue
# The main test, we first try to use casting and if it succeeds
# continue below testing that things are the same, otherwise
# test that the alternative paths at least also fail.
try:
cast = np.array(scalar).astype(dtype)
except (TypeError, ValueError, RuntimeError):
# coercion should also raise (error type may change)
with pytest.raises(Exception):
np.array(scalar, dtype=dtype)
if (isinstance(scalar, rational) and
np.issubdtype(dtype, np.signedinteger)):
return
with pytest.raises(Exception):
np.array([scalar], dtype=dtype)
# assignment should also raise
res = np.zeros((), dtype=dtype)
with pytest.raises(Exception):
res[()] = scalar
return
# Non error path:
arr = np.array(scalar, dtype=dtype)
assert_array_equal(arr, cast)
# assignment behaves the same
ass = np.zeros((), dtype=dtype)
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
def test_pyscalar_subclasses(self, pyscalar):
"""NumPy arrays are read/write which means that anything but invariant
behaviour is on thin ice. However, we currently are happy to discover
subclasses of Python float, int, complex the same as the base classes.
This should potentially be deprecated.
"""
class MyScalar(type(pyscalar)):
pass
res = np.array(MyScalar(pyscalar))
expected = np.array(pyscalar)
assert_array_equal(res, expected)
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
def test_default_dtype_instance(self, dtype_char):
if dtype_char in "SU":
dtype = np.dtype(dtype_char + "1")
elif dtype_char == "V":
# Legacy behaviour was to use V8. The reason was float64 being the
# default dtype and that having 8 bytes.
dtype = np.dtype("V8")
else:
dtype = np.dtype(dtype_char)
discovered_dtype, _ = _discover_array_parameters([], type(dtype))
assert discovered_dtype == dtype
assert discovered_dtype.itemsize == dtype.itemsize
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_scalar_to_int_coerce_does_not_cast(self, dtype):
"""
Signed integers are currently different in that they do not cast other
NumPy scalar, but instead use scalar.__int__(). The hardcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
invalid_int = np.ulonglong(-1)
float_nan = np.float64(np.nan)
for scalar in [float_nan, invalid_int]:
# This is a special case using casting logic and thus not failing:
coerced = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(coerced, cast)
# However these fail:
with pytest.raises((ValueError, OverflowError)):
np.array([scalar], dtype=dtype)
with pytest.raises((ValueError, OverflowError)):
cast[()] = scalar
class TestTimeScalars:
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
param(np.timedelta64(123, "s"), id="timedelta64[s]"),
param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
param(np.datetime64(1, "D"), id="datetime64[D]")],)
def test_coercion_basic(self, dtype, scalar):
# Note the `[scalar]` is there because np.array(scalar) uses stricter
# `scalar.__int__()` rules for backward compatibility right now.
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(arr, cast)
ass = np.ones((), dtype=dtype)
if issubclass(dtype, np.integer):
with pytest.raises(TypeError):
# raises, as would np.array([scalar], dtype=dtype), this is
# conversion from times, but behaviour of integers.
ass[()] = scalar
else:
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
# Only "ns" and "generic" timedeltas can be converted to numbers
# so these are slightly special.
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
ass = np.ones((), dtype=dtype)
ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
assert_array_equal(arr, cast)
assert_array_equal(cast, cast)
@pytest.mark.parametrize("dtype", ["S6", "U6"])
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_datetime(self, val, unit, dtype):
# String from datetime64 assignment is currently special cased to
# never use casting. This is because casting will error in this
# case, and traditionally in most cases the behaviour is maintained
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
# TODO: This discrepancy _should_ be resolved, either by relaxing the
# cast, or by deprecating the first part.
scalar = np.datetime64(val, unit)
dtype = np.dtype(dtype)
cut_string = dtype.type(str(scalar)[:6])
arr = np.array(scalar, dtype=dtype)
assert arr[()] == cut_string
ass = np.ones((), dtype=dtype)
ass[()] = scalar
assert ass[()] == cut_string
with pytest.raises(RuntimeError):
# However, unlike the above assignment using `str(scalar)[:6]`
# due to being handled by the string DType and not be casting
# the explicit cast fails:
np.array(scalar).astype(dtype)
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_timedelta(self, val, unit):
scalar = np.timedelta64(val, unit)
# Unlike datetime64, timedelta allows the unsafe cast:
np.array(scalar, dtype="S6")
cast = np.array(scalar).astype("S6")
ass = np.ones((), dtype="S6")
ass[()] = scalar
expected = scalar.astype("S")[:6]
assert cast[()] == expected
assert ass[()] == expected
class TestNested:
def test_nested_simple(self):
initial = [1.2]
nested = initial
for i in range(np.MAXDIMS - 1):
nested = [nested]
arr = np.array(nested, dtype="float64")
assert arr.shape == (1,) * np.MAXDIMS
with pytest.raises(ValueError):
np.array([nested], dtype="float64")
# We discover object automatically at this time:
with assert_warns(np.VisibleDeprecationWarning):
arr = np.array([nested])
assert arr.dtype == np.dtype("O")
assert arr.shape == (1,) * np.MAXDIMS
assert arr.item() is initial
def test_pathological_self_containing(self):
# Test that this also works for two nested sequences
l = []
l.append(l)
arr = np.array([l, l, l], dtype=object)
assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
# Also check a ragged case:
arr = np.array([l, [None], l], dtype=object)
assert arr.shape == (3, 1)
@pytest.mark.parametrize("arraylike", arraylikes())
def test_nested_arraylikes(self, arraylike):
# We try storing an array like into an array, but the array-like
# will have too many dimensions. This means the shape discovery
# decides that the array-like must be treated as an object (a special
# case of ragged discovery). The result will be an array with one
# dimension less than the maximum dimensions, and the array being
# assigned to it (which does work for object or if `float(arraylike)`
# works).
initial = arraylike(np.ones((1, 1)))
nested = initial
for i in range(np.MAXDIMS - 1):
nested = [nested]
with pytest.warns(DeprecationWarning):
# It will refuse to assign the array into
np.array(nested, dtype="float64")
# If this is object, we end up assigning a (1, 1) array into (1,)
# (due to running out of dimensions), this is currently supported but
# a special case which is not ideal.
arr = np.array(nested, dtype=object)
assert arr.shape == (1,) * np.MAXDIMS
assert arr.item() == np.array(initial).item()
@pytest.mark.parametrize("arraylike", arraylikes())
def test_uneven_depth_ragged(self, arraylike):
arr = np.arange(4).reshape((2, 2))
arr = arraylike(arr)
# Array is ragged in the second dimension already:
out = np.array([arr, [arr]], dtype=object)
assert out.shape == (2,)
assert out[0] is arr
assert type(out[1]) is list
# Array is ragged in the third dimension:
with pytest.raises(ValueError):
# This is a broadcast error during assignment, because
# the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
np.array([arr, [arr, arr]], dtype=object)
def test_empty_sequence(self):
arr = np.array([[], [1], [[1]]], dtype=object)
assert arr.shape == (3,)
# The empty sequence stops further dimension discovery, so the
# result shape will be (0,) which leads to an error during:
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
def test_array_of_different_depths(self):
# When multiple arrays (or array-likes) are included in a
# sequences and have different depth, we currently discover
# as many dimensions as they share. (see also gh-17224)
arr = np.zeros((3, 2))
mismatch_first_dim = np.zeros((1, 2))
mismatch_second_dim = np.zeros((3, 3))
dtype, shape = _discover_array_parameters(
[arr, mismatch_second_dim], dtype=np.dtype("O"))
assert shape == (2, 3)
dtype, shape = _discover_array_parameters(
[arr, mismatch_first_dim], dtype=np.dtype("O"))
assert shape == (2,)
# The second case is currently supported because the arrays
# can be stored as objects:
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
assert res[0] is arr
assert res[1] is mismatch_first_dim
class TestBadSequences:
# These are tests for bad objects passed into `np.array`, in general
# these have undefined behaviour. In the old code they partially worked
# when now they will fail. We could (and maybe should) create a copy
# of all sequences to be safe against bad-actors.
def test_growing_list(self):
# List to coerce, `mylist` will append to it during coercion
obj = []
class mylist(list):
def __len__(self):
obj.append([1, 2])
return super().__len__()
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
# Note: We do not test a shrinking list. These do very evil things
# and the only way to fix them would be to copy all sequences.
# (which may be a real option in the future).
def test_mutated_list(self):
# List to coerce, `mylist` will mutate the first element
obj = []
class mylist(list):
def __len__(self):
obj[0] = [2, 3] # replace with a different list.
return super().__len__()
obj.append([2, 3])
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
def test_replace_0d_array(self):
# List to coerce, `mylist` will mutate the first element
obj = []
class baditem:
def __len__(self):
obj[0][0] = 2 # replace with a different list.
raise ValueError("not actually a sequence!")
def __getitem__(self):
pass
# Runs into a corner case in the new code, the `array(2)` is cached
# so replacing it invalidates the cache.
obj.append([np.array(2), baditem()])
with pytest.raises(RuntimeError):
np.array(obj)
class TestArrayLikes:
@pytest.mark.parametrize("arraylike", arraylikes())
def test_0d_object_special_case(self, arraylike):
arr = np.array(0.)
obj = arraylike(arr)
# A single array-like is always converted:
res = np.array(obj, dtype=object)
assert_array_equal(arr, res)
# But a single 0-D nested array-like never:
res = np.array([obj], dtype=object)
assert res[0] is obj
def test_0d_generic_special_case(self):
class ArraySubclass(np.ndarray):
def __float__(self):
raise TypeError("e.g. quantities raise on this")
arr = np.array(0.)
obj = arr.view(ArraySubclass)
res = np.array(obj)
# The subclass is simply cast:
assert_array_equal(arr, res)
# If the 0-D array-like is included, __float__ is currently
# guaranteed to be used. We may want to change that, quantities
# and masked arrays half make use of this.
with pytest.raises(TypeError):
np.array([obj])
# The same holds for memoryview:
obj = memoryview(arr)
res = np.array(obj)
assert_array_equal(arr, res)
with pytest.raises(ValueError):
# The error type does not matter much here.
np.array([obj])
def test_arraylike_classes(self):
# The classes of array-likes should generally be acceptable to be
# stored inside a numpy (object) array. This tests all of the
# special attributes (since all are checked during coercion).
arr = np.array(np.int64)
assert arr[()] is np.int64
arr = np.array([np.int64])
assert arr[0] is np.int64
# This also works for properties/unbound methods:
class ArrayLike:
@property
def __array_interface__(self):
pass
@property
def __array_struct__(self):
pass
def __array__(self):
pass
arr = np.array(ArrayLike)
assert arr[()] is ArrayLike
arr = np.array([ArrayLike])
assert arr[0] is ArrayLike
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
def test_too_large_array_error_paths(self):
"""Test the error paths, including for memory leaks"""
arr = np.array(0, dtype="uint8")
# Guarantees that a contiguous copy won't work:
arr = np.broadcast_to(arr, 2**62)
for i in range(5):
# repeat, to ensure caching cannot have an effect:
with pytest.raises(MemoryError):
np.array(arr)
with pytest.raises(MemoryError):
np.array([arr])
@pytest.mark.parametrize("attribute",
["__array_interface__", "__array__", "__array_struct__"])
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_attributes(self, attribute, error):
# RecursionError and MemoryError are considered fatal. All errors
# (except AttributeError) should probably be raised in the future,
# but shapely made use of it, so it will require a deprecation.
class BadInterface:
def __getattr__(self, attr):
if attr == attribute:
raise error
super().__getattr__(attr)
with pytest.raises(error):
np.array(BadInterface())
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_bad_length(self, error):
# RecursionError and MemoryError are considered "critical" in
# sequences. We could expand this more generally though. (NumPy 1.20)
class BadSequence:
def __len__(self):
raise error
def __getitem__(self):
# must have getitem to be a Sequence
return 1
with pytest.raises(error):
np.array(BadSequence())
class TestSpecialAttributeLookupFailure:
# An exception was raised while fetching the attribute
class WeirdArrayLike:
@property
def __array__(self):
raise RuntimeError("oops!")
class WeirdArrayInterface:
@property
def __array_interface__(self):
raise RuntimeError("oops!")
def test_deprecated(self):
with pytest.raises(RuntimeError):
np.array(self.WeirdArrayLike())
with pytest.raises(RuntimeError):
np.array(self.WeirdArrayInterface())
| 29,075 | Python | 36.859375 | 83 | 0.589888 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_datetime.py |
import numpy
import numpy as np
import datetime
import pytest
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
assert_raises_regex, assert_array_equal,
)
from numpy.compat import pickle
# Use pytz to test out various time zones if available
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
# Generic units shouldn't add [] to the end
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
assert_(np.can_cast('i4', 'm8', casting='same_kind'))
assert_(np.can_cast('i4', 'm8', casting='safe'))
assert_(np.can_cast('u4', 'm8', casting='same_kind'))
assert_(np.can_cast('u4', 'm8', casting='safe'))
# Cannot cast safely from unsigned integer of the same size, which
# could overflow
assert_(np.can_cast('u8', 'm8', casting='same_kind'))
assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
# Can cast datetime same_kind across the date/time boundary
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
# Can cast timedelta same_kind across the date/time boundary
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
# Cannot cast safely if the integer multiplier doesn't divide
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_datetime_prefix_conversions(self):
# regression tests related to gh-19631;
# test metric prefixes from seconds down to
# attoseconds for bidirectional conversions
smaller_units = ['M8[7000ms]',
'M8[2000us]',
'M8[1000ns]',
'M8[5000ns]',
'M8[2000ps]',
'M8[9000fs]',
'M8[1000as]',
'M8[2000000ps]',
'M8[1000000as]',
'M8[2000000000ps]',
'M8[1000000000as]']
larger_units = ['M8[7s]',
'M8[2ms]',
'M8[us]',
'M8[5us]',
'M8[2ns]',
'M8[9ps]',
'M8[1fs]',
'M8[2us]',
'M8[1ps]',
'M8[2ms]',
'M8[1ns]']
for larger_unit, smaller_unit in zip(larger_units, smaller_units):
assert np.can_cast(larger_unit, smaller_unit, casting='safe')
assert np.can_cast(smaller_unit, larger_unit, casting='safe')
@pytest.mark.parametrize("unit", [
"s", "ms", "us", "ns", "ps", "fs", "as"])
def test_prohibit_negative_datetime(self, unit):
with assert_raises(TypeError):
np.array([1], dtype=f"M8[-1{unit}]")
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
# the example provided in gh-12629
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
# multiple NaTs
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
# this sort explores another code path for NaT
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
# 2-D array
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
# fix for gh-12629 and gh-15063; NaT sorting to end of array
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"numpy.datetime64('NaT')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"numpy.datetime64('2011-02')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
# verify that supplying an explicit timezone works, but is deprecated
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
# find "supertype" for non-dates and dates
b = np.bool_(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
# test all date / time units and use
# "generic" to select generic unit
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic") ])
def test_timedelta_np_int_construction(self, unit):
# regression test for gh-7617
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
# Default construction means 0
assert_equal(np.timedelta64(), np.timedelta64(0))
# None gets constructed as NaT
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
# Some basic strings and repr
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"numpy.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"numpy.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"numpy.timedelta64(12)")
# Construction from an integer produces generic units
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
# Construction from datetime.timedelta
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
# Cannot construct across nonlinear time unit boundaries
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
a = datetime.timedelta(seconds=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta(weeks=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta()
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
def test_timedelta_object_array_conversion(self):
# Regression test for gh-11096
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
# Regression test for gh-11151
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
# expected value from the array constructor workaround
# described in above issue
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_nat_format(self):
# gh-17552
assert_equal('NaT', '{0}'.format(np.timedelta64('nat')))
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
# Python date object
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
# Python datetime object
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
# 'today' special value
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
# 'now' special value
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
# Arrays
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
# Scalars -> Scalars
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
# Arrays -> Scalars
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
# NaN -> NaT
nan = np.array([np.nan] * 8)
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
nat = np.array([np.datetime64('NaT')] * 8)
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8)
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
# Will fail if the date changes during the exact right moment
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
# datetime.datetime.now() returns local time, not UTC
#a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
#assert_equal(a[0], a[1])
# we can give a datetime.date time units
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
# Unicode to datetime
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
# Datetime to unicode
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
# Datetime to long string - gh-9712
assert_equal(str_a, dt_a.astype((np.string_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
@pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
def test_time_byteswapping(self, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
times_swapped = times.astype(times.dtype.newbyteorder())
assert_array_equal(times, times_swapped)
unswapped = times_swapped.view(np.int64).newbyteorder()
assert_array_equal(unswapped, times.view(np.int64))
@pytest.mark.parametrize(["time1", "time2"],
[("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")])
def test_time_byteswapped_cast(self, time1, time2):
dtype1 = np.dtype(time1)
dtype2 = np.dtype(time2)
times = np.array(["2017", "NaT"], dtype=dtype1)
expected = times.astype(dtype2)
# Test that every byte-swapping combination also returns the same
# results (previous tests check that this comparison works fine).
res = times.astype(dtype1.newbyteorder()).astype(dtype2)
assert_array_equal(res, expected)
res = times.astype(dtype2.newbyteorder())
assert_array_equal(res, expected)
res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder())
assert_array_equal(res, expected)
@pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
@pytest.mark.parametrize("str_dtype", ["U", "S"])
def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
# Unfortunately, timedelta does not roundtrip:
from_strings = np.array(["2017", "NaT"], dtype=str_dtype)
to_strings = times.astype(str_dtype) # assume this is correct
# Check that conversion from times to string works if src is swapped:
times_swapped = times.astype(times.dtype.newbyteorder())
res = times_swapped.astype(str_dtype)
assert_array_equal(res, to_strings)
# And also if both are swapped:
res = times_swapped.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
# only destination is swapped:
res = times.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
# Check that conversion from string to times works if src is swapped:
from_strings_swapped = from_strings.astype(
from_strings.dtype.newbyteorder())
res = from_strings_swapped.astype(time_dtype)
assert_array_equal(res, times)
# And if both are swapped:
res = from_strings_swapped.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
# Only destination is swapped:
res = from_strings.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
# Check that pickle roundtripping works
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta and float cannot be safely cast with each other
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# Shouldn't be able to compare datetime and timedelta
# TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
# default is needed to properly catch this kind of thing...
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
#assert_raises(TypeError, np.less, a, b)
assert_raises(TypeError, np.less, a, b, casting='same_kind')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# positive ufunc
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3*24, tdc)
assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3*24 + tdb, tdc)
assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3*24, -tdb)
assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3*24 - tdb, tdc)
assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
# m8 same units round down
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31),
60),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.parametrize("op1, op2", [
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
# the smallest integer that can't be represented
# exactly in a double should be preserved if we avoid
# casting to double in floordiv operation
(9007199254740993, 1),
# stress the alternate floordiv code path where
# operand signs don't match and remainder isn't 0
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
# Python reference integer floor
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for floor division operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
# reuse the test cases from floordiv
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31)),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.parametrize("op1, op2", [
# reuse cases from floordiv
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with assert_warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6 / 9)
assert_equal(np.divide(tda, tdb), 6 / 9)
assert_equal(np.true_divide(tda, tdb), 6 / 9)
assert_equal(tdb / tda, 9 / 6)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60)
assert_equal(tdd / tda, 1 / 60)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow positive years
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
# gh-13062
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
for us in ['us', 'μs', b'us']: # check non-ascii and bytes too
assert_equal(np.datetime_as_string(np.datetime64(datetime, us)),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
# 'auto' doesn't split up the date
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
# case from gh-12092
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
# negative value cases
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
# larger value cases
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
# different units
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
# NaT is propagated
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
# Y % M works
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for modulus operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
# cases where one operand is not
# timedelta64
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
# NOTE: some of the operations may be supported
# in the future
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_timedelta_correct_mean(self):
# test mainly because it worked only via a bug in that allowed:
# `timedelta.sum(dtype="f8")` to ignore the dtype request.
a = np.arange(1000, dtype="m8[s]")
assert_array_equal(a.mean(), a.sum() / len(a))
def test_datetime_no_subtract_reducelike(self):
# subtracting two datetime64 works, but we cannot reduce it, since
# the result of that subtraction will have a different dtype.
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
msg = r"the resolved dtypes are not compatible"
with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
# Last Monday in June
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
# Default M-F business days, different roll modes
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
# Bigger offset values
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
# NaT values when roll is not raise
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
# Default M-F weekmask
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
# Check string weekmask with varying whitespace.
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
# Check length 7 0/1 string
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
# Check length 7 string weekmask.
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
# weekday names must be correct case
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="")
# Invalid weekday name codes should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
# With exactly one holiday
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
# With the holiday appearing twice
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
# With a NaT holiday
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# With another holiday before
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday before and after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# A bigger forward jump across more than one week/holiday
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
# A bigger backward jump across more than one week/holiday
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366))
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
-np.arange(366))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
# Number of Mondays in March 2011
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Weekend/weekday tests
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
# All the holidays are not business days
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
# Test parsing on either side of the Y2038 boundary
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
# Test parsing on either side of the Y2038 boundary with
# a manually specified timezone offset
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
# Test parsing a date after Y2038
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
# Test that only datetime dtype arrays are accepted
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
'''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
'''
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype= dstr % unit)
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
# At some point this caused a stack overflow (gh-11154). Now raises
# ValueError since the nested list cannot be converted to a datetime.
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10") # try a numpy string type
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
# compound units
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
"""
Dates should have symmetric limits around the unix epoch at +/-np.int64
"""
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
# above should not have overflowed
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
"""
Limits should roundtrip when converted to strings.
This tests the conversion to and from npy_datetimestruct.
"""
# TODO: add absolute (gold standard) time span limit strings
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
# Convert to string and back. Explicit unit needed since the day and
# week reprs are not distinguishable.
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
def test_bytes(self):
# byte units are converted to unicode
dt = np.datetime64('2000', (b'ms', 5))
assert np.datetime_data(dt.dtype) == ('ms', 5)
dt = np.datetime64('2000', b'5ms')
assert np.datetime_data(dt.dtype) == ('ms', 5)
def test_non_ascii(self):
# μs is normalized to μ
dt = np.datetime64('2000', ('μs', 5))
assert np.datetime_data(dt.dtype) == ('us', 5)
dt = np.datetime64('2000', '5μs')
assert np.datetime_data(dt.dtype) == ('us', 5)
| 114,951 | Python | 44.453539 | 101 | 0.526511 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_dlpack.py | import sys
import pytest
import numpy as np
from numpy.testing import assert_array_equal, IS_PYPY
class TestDLPack:
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self):
x = np.arange(5)
y = x.__dlpack__()
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
def test_dunder_dlpack_stream(self):
x = np.arange(5)
x.__dlpack__(stream=None)
with pytest.raises(RuntimeError):
x.__dlpack__(stream=1)
def test_strides_not_multiple_of_itemsize(self):
dt = np.dtype([('int', np.int32), ('char', np.int8)])
y = np.zeros((5,), dtype=dt)
z = y['int']
with pytest.raises(RuntimeError):
np.from_dlpack(z)
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self):
x = np.arange(5)
y = np.from_dlpack(x)
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
@pytest.mark.parametrize("dtype", [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.complex64, np.complex128
])
def test_dtype_passthrough(self, dtype):
x = np.arange(5, dtype=dtype)
y = np.from_dlpack(x)
assert y.dtype == x.dtype
assert_array_equal(x, y)
def test_invalid_dtype(self):
x = np.asarray(np.datetime64('2021-05-27'))
with pytest.raises(TypeError):
np.from_dlpack(x)
def test_invalid_byte_swapping(self):
dt = np.dtype('=i8').newbyteorder()
x = np.arange(5, dtype=dt)
with pytest.raises(TypeError):
np.from_dlpack(x)
def test_non_contiguous(self):
x = np.arange(25).reshape((5, 5))
y1 = x[0]
assert_array_equal(y1, np.from_dlpack(y1))
y2 = x[:, 0]
assert_array_equal(y2, np.from_dlpack(y2))
y3 = x[1, :]
assert_array_equal(y3, np.from_dlpack(y3))
y4 = x[1]
assert_array_equal(y4, np.from_dlpack(y4))
y5 = np.diagonal(x).copy()
assert_array_equal(y5, np.from_dlpack(y5))
@pytest.mark.parametrize("ndim", range(33))
def test_higher_dims(self, ndim):
shape = (1,) * ndim
x = np.zeros(shape, dtype=np.float64)
assert shape == np.from_dlpack(x).shape
def test_dlpack_device(self):
x = np.arange(5)
assert x.__dlpack_device__() == (1, 0)
y = np.from_dlpack(x)
assert y.__dlpack_device__() == (1, 0)
z = y[::2]
assert z.__dlpack_device__() == (1, 0)
def dlpack_deleter_exception(self):
x = np.arange(5)
_ = x.__dlpack__()
raise RuntimeError
def test_dlpack_destructor_exception(self):
with pytest.raises(RuntimeError):
self.dlpack_deleter_exception()
def test_readonly(self):
x = np.arange(5)
x.flags.writeable = False
with pytest.raises(TypeError):
x.__dlpack__()
def test_ndim0(self):
x = np.array(1.0)
y = np.from_dlpack(x)
assert_array_equal(x, y)
def test_size1dims_arrays(self):
x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
buffer=np.ones(1000, dtype=np.uint8), order='F')
y = np.from_dlpack(x)
assert_array_equal(x, y)
| 3,502 | Python | 27.25 | 72 | 0.555682 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_machar.py | """
Test machar. Given recent changes to hardcode type data, we might want to get
rid of both MachAr and this test at some point.
"""
from numpy.core._machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
class TestMachAr:
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
try:
hiprec = ntypes.float96
MachAr(lambda v: array(v, hiprec))
except AttributeError:
# Fixme, this needs to raise a 'skip' exception.
"Skipping test: no ntypes.float96 available on this platform."
def test_underlow(self):
# Regression test for #759:
# instantiating MachAr for dtype = np.float96 raises spurious warning.
with errstate(all='raise'):
try:
self._run_machar_highprec()
except FloatingPointError as e:
msg = "Caught %s exception, should not have been raised." % e
raise AssertionError(msg)
| 1,067 | Python | 33.451612 | 78 | 0.637301 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_cpu_features.py | import sys, platform, re, pytest
from numpy.core._multiarray_umath import __cpu_features__
def assert_features_equal(actual, desired, fname):
__tracebackhide__ = True # Hide traceback for py.test
actual, desired = str(actual), str(desired)
if actual == desired:
return
detected = str(__cpu_features__).replace("'", "")
try:
with open("/proc/cpuinfo", "r") as fd:
cpuinfo = fd.read(2048)
except Exception as err:
cpuinfo = str(err)
try:
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
auxv = auxv.decode()
except Exception as err:
auxv = str(err)
import textwrap
error_report = textwrap.indent(
"""
###########################################
### Extra debugging information
###########################################
-------------------------------------------
--- NumPy Detections
-------------------------------------------
%s
-------------------------------------------
--- SYS / CPUINFO
-------------------------------------------
%s....
-------------------------------------------
--- SYS / AUXV
-------------------------------------------
%s
""" % (detected, cpuinfo, auxv), prefix='\r')
raise AssertionError((
"Failure Detection\n"
" NAME: '%s'\n"
" ACTUAL: %s\n"
" DESIRED: %s\n"
"%s"
) % (fname, actual, desired, error_report))
class AbstractTest:
features = []
features_groups = {}
features_map = {}
features_flags = set()
def load_flags(self):
# a hook
pass
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
if isinstance(map_names, str):
return map_names in self.features_flags
for f in map_names:
if f in self.features_flags:
return True
return False
def load_flags_cpuinfo(self, magic_key):
self.features_flags = self.get_cpuinfo_item(magic_key)
def get_cpuinfo_item(self, magic_key):
values = set()
with open('/proc/cpuinfo') as fd:
for line in fd:
if not line.startswith(magic_key):
continue
flags_value = [s.strip() for s in line.split(':', 1)]
if len(flags_value) == 2:
values = values.union(flags_value[1].upper().split())
return values
def load_flags_auxv(self):
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
for at in auxv.split(b'\n'):
if not at.startswith(b"AT_HWCAP"):
continue
hwcap_value = [s.strip() for s in at.split(b':', 1)]
if len(hwcap_value) == 2:
self.features_flags = self.features_flags.union(
hwcap_value[1].upper().decode().split()
)
is_linux = sys.platform.startswith('linux')
is_cygwin = sys.platform.startswith('cygwin')
machine = platform.machine()
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
@pytest.mark.skipif(
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
)
class Test_X86_Features(AbstractTest):
features = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
]
features_groups = dict(
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI"],
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
)
features_map = dict(
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
)
def load_flags(self):
self.load_flags_cpuinfo("flags")
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
class Test_POWER_Features(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
class Test_ZARCH_Features(AbstractTest):
features = ["VX", "VXE", "VXE2"]
def load_flags(self):
self.load_flags_auxv()
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
features = [
"NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = dict(
NEON_FP16 = ["NEON", "HALF"],
NEON_VFPV4 = ["NEON", "VFPV4"],
)
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.get_cpuinfo_item("CPU architecture")
# in case of mounting virtual filesystem of aarch64 kernel
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = dict(
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
)
else:
self.features_map = dict(
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
# if the kernel reports any one of the following ARM8 features.
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
)
| 7,169 | Python | 37.548387 | 103 | 0.556563 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_ufunc.py | import warnings
import itertools
import sys
import pytest
import numpy as np
import numpy.core._umath_tests as umt
import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose, HAS_REFCOUNT, suppress_warnings
)
from numpy.testing._private.utils import requires_memory
from numpy.compat import pickle
UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
if isinstance(obj, np.ufunc)]
UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
class TestUfuncKwargs:
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
assert_raises(TypeError, np.add, 1, 2, subokx=False)
assert_raises(TypeError, np.add, 1, 2, wherex=[True])
def test_sig_signature(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
signature='ii->i')
def test_sig_dtype(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
dtype=int)
assert_raises(TypeError, np.add, 1, 2, signature='ii->i',
dtype=int)
def test_extobj_refcount(self):
# Should not segfault with USE_DEBUG.
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
class TestUfuncGenericLoops:
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
"""
np_dtypes = [
(np.single, np.single), (np.single, np.double),
(np.csingle, np.csingle), (np.csingle, np.cdouble),
(np.double, np.double), (np.longdouble, np.longdouble),
(np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
def f2(x, y):
return x**y
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs, xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
class foo:
def conjugate(self):
return np.bool_(1)
def logical_xor(self, obj):
return np.bool_(1)
def test_unary_PyUFunc_O_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.abs(x) == 1))
def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.conjugate(x) == True))
def test_binary_PyUFunc_OO_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.add(x, x) == 2))
def test_binary_PyUFunc_OO_O_method(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_binary_PyUFunc_On_Om_method(self, foo=foo):
x = np.full((10, 2, 3), foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_python_complex_conjugate(self):
# The conjugate ufunc should fall back to calling the method:
arr = np.array([1+2j, 3-4j], dtype="O")
assert isinstance(arr[0], complex)
res = np.conjugate(arr)
assert res.dtype == np.dtype("O")
assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
@pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
def test_unary_PyUFunc_O_O_method_full(self, ufunc):
"""Compare the result of the object loop with non-object one"""
val = np.float64(np.pi/4)
class MyFloat(np.float64):
def __getattr__(self, attr):
try:
return super().__getattr__(attr)
except AttributeError:
return lambda: getattr(np.core.umath, attr)(val)
# Use 0-D arrays, to ensure the same element call
num_arr = np.array(val, dtype=np.float64)
obj_arr = np.array(MyFloat(val), dtype="O")
with np.errstate(all="raise"):
try:
res_num = ufunc(num_arr)
except Exception as exc:
with assert_raises(type(exc)):
ufunc(obj_arr)
else:
res_obj = ufunc(obj_arr)
assert_array_almost_equal(res_num.astype("O"), res_obj)
def _pickleable_module_global():
pass
class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
protocol=proto)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace
# such as numpy.core._rational_tests.test_add can also be pickled
res = pickle.loads(pickle.dumps(_rational_tests.test_add,
protocol=proto))
assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
def test_pickle_name_is_qualname(self):
# This tests that a simplification of our ufunc pickle code will
# lead to allowing qualnames as names. Future ufuncs should
# possible add a specific qualname, or a hook into pickling instead
# (dask+numba may benefit).
_pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
assert obj is umt._pickleable_module_global_ufunc
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
# from include/numpy/ufuncobject.h
size_inferred = 2
can_ignore = 4
def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i),(i)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 1, 0))
assert_equal(ixs, (0, 0))
assert_equal(flags, (self.size_inferred,))
assert_equal(sizes, (-1,))
def test_signature1(self):
# empty core signature; treat as plain ufunc (with trivial core)
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(),()->()")
assert_equal(enabled, 0)
assert_equal(num_dims, (0, 0, 0))
assert_equal(ixs, ())
assert_equal(flags, ())
assert_equal(sizes, ())
def test_signature2(self):
# more complicated names for variables
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i1,i2),(J_1)->(_kAB)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 1))
assert_equal(ixs, (0, 1, 2, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature3(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, u"(i1, i12), (J_1)->(i12, i2)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 2))
assert_equal(ixs, (0, 1, 2, 1, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature4(self):
# matrix_multiply signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n,k),(k,m)->(n,m)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred,)*3)
assert_equal(sizes, (-1, -1, -1))
def test_signature5(self):
# matmul signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n?,k),(k,m?)->(n?,m?)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred | self.can_ignore,
self.size_inferred,
self.size_inferred | self.can_ignore))
assert_equal(sizes, (-1, -1, -1))
def test_signature6(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "(3)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature7(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3),(03,3),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (0, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature8(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3?),(3?,3?),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature9(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "( 3) -> ( )")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature10(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature_failure_extra_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "((i)),(i)->()")
def test_signature_failure_mismatching_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),)i(->()")
def test_signature_failure_signature_missing_input_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),->()")
def test_signature_failure_signature_missing_output_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 2, "(i),(i)->()")
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
with pytest.warns(DeprecationWarning):
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
with pytest.warns(DeprecationWarning):
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'),
[0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
with pytest.warns(DeprecationWarning):
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
with pytest.warns(DeprecationWarning):
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_signature_all_None(self):
# signature all None, is an acceptable alternative (since 1.21)
# to not providing a signature.
res1 = np.add([3], [4], sig=(None, None, None))
res2 = np.add([3], [4])
assert_array_equal(res1, res2)
res1 = np.maximum([3], [4], sig=(None, None, None))
res2 = np.maximum([3], [4])
assert_array_equal(res1, res2)
with pytest.raises(TypeError):
# special case, that would be deprecated anyway, so errors:
np.add(3, 4, signature=(None,))
def test_signature_dtype_type(self):
# Since that will be the normal behaviour (past NumPy 1.21)
# we do support the types already:
float_dtype = type(np.dtype(np.float64))
np.add(3, 4, signature=(float_dtype, float_dtype, None))
@pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"])
def test_partial_signature_mismatch(self, casting):
# If the second argument matches already, no need to specify it:
res = np.ldexp(np.float32(1.), np.int_(2), dtype="d")
assert res.dtype == "d"
res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d"))
assert res.dtype == "d"
# ldexp only has a loop for long input as second argument, overriding
# the output cannot help with that (no matter the casting)
with pytest.raises(TypeError):
np.ldexp(1., np.uint64(3), dtype="d")
with pytest.raises(TypeError):
np.ldexp(1., np.uint64(3), signature=(None, None, "d"))
def test_partial_signature_mismatch_with_cache(self):
with pytest.raises(TypeError):
np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
# Ensure e,d->None is in the dispatching cache (double loop)
np.add(np.float16(1), np.float64(2))
# The error must still be raised:
with pytest.raises(TypeError):
np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
def test_use_output_signature_for_all_arguments(self):
# Test that providing only `dtype=` or `signature=(None, None, dtype)`
# is sufficient if falling back to a homogeneous signature works.
# In this case, the `intp, intp -> intp` loop is chosen.
res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe")
assert res == 1 # the cast happens first.
res = np.power(1.5, 2.8, signature=(None, None, np.intp),
casting="unsafe")
assert res == 1
with pytest.raises(TypeError):
# the unsafe casting would normally cause errors though:
np.power(1.5, 2.8, dtype=np.intp)
def test_signature_errors(self):
with pytest.raises(TypeError,
match="the signature object to ufunc must be a string or"):
np.add(3, 4, signature=123.) # neither a string nor a tuple
with pytest.raises(ValueError):
# bad symbols that do not translate to dtypes
np.add(3, 4, signature="%^->#")
with pytest.raises(ValueError):
np.add(3, 4, signature=b"ii-i") # incomplete and byte string
with pytest.raises(ValueError):
np.add(3, 4, signature="ii>i") # incomplete string
with pytest.raises(ValueError):
np.add(3, 4, signature=(None, "f8")) # bad length
with pytest.raises(UnicodeDecodeError):
np.add(3, 4, signature=b"\xff\xff->i")
def test_forced_dtype_times(self):
# Signatures only set the type numbers (not the actual loop dtypes)
# so using `M` in a signature/dtype should generally work:
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]')
np.maximum(a, a, dtype="M")
np.maximum.reduce(a, dtype="M")
arr = np.arange(10, dtype="m8[s]")
np.add(arr, arr, dtype="m")
np.maximum(arr, arr, dtype="m")
@pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
def test_cast_safety(self, ufunc):
"""Basic test for the safest casts, because ufuncs inner loops can
indicate a cast-safety as well (which is normally always "no").
"""
def call_ufunc(arr, **kwargs):
return ufunc(*(arr,) * ufunc.nin, **kwargs)
arr = np.array([1., 2., 3.], dtype=np.float32)
arr_bs = arr.astype(arr.dtype.newbyteorder())
expected = call_ufunc(arr)
# Normally, a "no" cast:
res = call_ufunc(arr, casting="no")
assert_array_equal(expected, res)
# Byte-swapping is not allowed with "no" though:
with pytest.raises(TypeError):
call_ufunc(arr_bs, casting="no")
# But is allowed with "equiv":
res = call_ufunc(arr_bs, casting="equiv")
assert_array_equal(expected, res)
# Casting to float64 is safe, but not equiv:
with pytest.raises(TypeError):
call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
# but it is safe cast:
res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
expected = call_ufunc(arr.astype(np.float64)) # upcast
assert_array_equal(expected, res)
def test_true_divide(self):
a = np.array(10)
b = np.array(20)
tgt = np.array(0.5)
for tc in 'bhilqBHILQefdgFDG':
dt = np.dtype(tc)
aa = a.astype(dt)
bb = b.astype(dt)
# Check result value and dtype.
for x, y in itertools.product([aa, -aa], [bb, -bb]):
# Check with no output type specified
if tc in 'FDG':
tgt = complex(x)/complex(y)
else:
tgt = float(x)/float(y)
res = np.true_divide(x, y)
rtol = max(np.finfo(res).resolution, 1e-15)
assert_allclose(res, tgt, rtol=rtol)
if tc in 'bhilqBHILQ':
assert_(res.dtype.name == 'float64')
else:
assert_(res.dtype.name == dt.name )
# Check with output type specified. This also checks for the
# incorrect casts in issue gh-3484 because the unary '-' does
# not change types, even for unsigned types, Hence casts in the
# ufunc from signed to unsigned and vice versa will lead to
# errors in the values.
for tcout in 'bhilqBHILQ':
dtout = np.dtype(tcout)
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
for tcout in 'efdg':
dtout = np.dtype(tcout)
if tc in 'FDG':
# Casting complex to float is not allowed
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
else:
tgt = float(x)/float(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
# The value of tiny for double double is NaN
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(dtout).tiny):
atol = max(np.finfo(dtout).tiny, 3e-308)
else:
atol = 3e-308
# Some test values result in invalid for float16.
with np.errstate(invalid='ignore'):
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res) and tcout == 'e':
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
for tcout in 'FDG':
dtout = np.dtype(tcout)
tgt = complex(x)/complex(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
# The value of tiny for double double is NaN
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(dtout).tiny):
atol = max(np.finfo(dtout).tiny, 3e-308)
else:
atol = 3e-308
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res):
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
# Check booleans
a = np.ones((), dtype=np.bool_)
res = np.true_divide(a, a)
assert_(res == 1.0)
assert_(res.dtype.name == 'float64')
res = np.true_divide(~a, a)
assert_(res == 0.0)
assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
# warning if sum overflows, which it does in float16
overflow = not np.isfinite(tgt)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_almost_equal(np.sum(d), tgt)
assert_equal(len(w), 1 * overflow)
assert_almost_equal(np.sum(d[::-1]), tgt)
assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_sum_initial(self):
# Integer, single axis
assert_equal(np.sum([3], initial=2), 5)
# Floating point
assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
# Multiple non-adjacent axes
assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
[12, 12, 12])
def test_sum_where(self):
# More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
where=[True, False]), [9., 5.])
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
# Broadcast in core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Extend core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_raises(ValueError, umt.inner1d, a, b)
# Broadcast should fail
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Writing to a broadcasted array with overlap should warn, gh-2705
a = np.arange(2)
b = np.arange(4).reshape((2, 2))
u, v = np.broadcast_arrays(a, b)
assert_equal(u.strides[0], 0)
x = u + v
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
u += v
assert_equal(len(w), 1)
assert_(x[0, 0] != u[0, 0])
# Output reduction should not be allowed.
# See gh-15139
a = np.arange(6).reshape(3, 2)
b = np.ones(2)
out = np.empty(())
assert_raises(ValueError, umt.inner1d, a, b, out)
out2 = np.empty(3)
c = umt.inner1d(a, b, out2)
assert_(c is out2)
def test_out_broadcasts(self):
# For ufuncs and gufuncs (not for reductions), we currently allow
# the output to cause broadcasting of the input arrays.
# both along dimensions with shape 1 and dimensions which do not
# exist at all in the inputs.
arr = np.arange(3).reshape(1, 3)
out = np.empty((5, 4, 3))
np.add(arr, arr, out=out)
assert (out == np.arange(3) * 2).all()
# The same holds for gufuncs (gh-16484)
umt.inner1d(arr, arr, out=out)
# the result would be just a scalar `5`, but is broadcast fully:
assert (out == 5).all()
@pytest.mark.parametrize(["arr", "out"], [
([2], np.empty(())),
([1, 2], np.empty(1)),
(np.ones((4, 3)), np.empty((4, 1)))],
ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"])
def test_out_broadcast_errors(self, arr, out):
# Output is (currently) allowed to broadcast inputs, but it cannot be
# smaller than the actual result.
with pytest.raises(ValueError, match="non-broadcastable"):
np.positive(arr, out=out)
with pytest.raises(ValueError, match="non-broadcastable"):
np.add(np.ones(()), arr, out=out)
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a + 0.1
assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_axes_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
c = inner1d(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
c = inner1d(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
c = inner1d(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
c = inner1d(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
c = inner1d(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
c = inner1d(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
# Check errors for improperly constructed axes arguments.
# should have list.
assert_raises(TypeError, inner1d, a, b, axes=-1)
# needs enough elements
assert_raises(ValueError, inner1d, a, b, axes=[-1])
# should pass in indices.
assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(8).reshape((2, 2, 2, 1)) + 1
# Sanity check.
c = mm(a, b)
assert_array_equal(c, np.matmul(a, b))
# Default axes.
c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
assert_array_equal(c, np.matmul(a, b))
# Default with explicit axes.
c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
assert_array_equal(c, np.matmul(a, b))
# swap some axes.
c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
b.transpose(0, 3, 1, 2)))
# Default with output array.
c = np.empty((2, 2, 3, 1))
d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b))
# Transposed output array
c = np.empty((1, 2, 2, 3))
d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
# Check errors for improperly constructed axes arguments.
# wrong argument
assert_raises(TypeError, mm, a, b, axis=1)
# axes should be list
assert_raises(TypeError, mm, a, b, axes=1)
assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
# list needs to have right length
assert_raises(ValueError, mm, a, b, axes=[])
assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
# list should contain tuples for multiple axes
assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
assert_raises(TypeError,
mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
assert_raises(TypeError,
mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
# tuples should not have duplicated values
assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
# arrays should have enough axes.
z = np.zeros((2, 2))
assert_raises(ValueError, mm, z, z[0])
assert_raises(ValueError, mm, z, z, out=z[:, 0])
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
# Regular ufuncs should not accept axes.
assert_raises(TypeError, np.add, 1., 1., axes=[0])
# should be able to deal with bad unrelated kwargs.
assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
def test_axis_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, axis=-1)
assert_array_equal(c, (a * b).sum(-1))
out = np.zeros_like(c)
d = inner1d(a, b, axis=-1, out=out)
assert_(d is out)
assert_array_equal(d, c)
c = inner1d(a, b, axis=0)
assert_array_equal(c, (a * b).sum(0))
# Sanity checks on innerwt and cumsum.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, axis=0),
np.sum(a * b * w, axis=0))
assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
out = np.empty_like(a)
b = umt.cumsum(a, out=out, axis=0)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=0))
b = umt.cumsum(a, out=out, axis=1)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=-1))
# Check errors.
# Cannot pass in both axis and axes.
assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
# Not an integer.
assert_raises(TypeError, inner1d, a, b, axis=[0])
# more than 1 core dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, axis=1)
# Output wrong size in axis.
out = np.empty((1, 2, 3), dtype=a.dtype)
assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
# Regular ufuncs should not accept axis.
assert_raises(TypeError, np.add, 1., 1., axis=0)
def test_keepdims_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
out = np.zeros_like(c)
d = inner1d(a, b, keepdims=True, out=out)
assert_(d is out)
assert_array_equal(d, c)
# Now combined with axis and axes.
c = inner1d(a, b, axis=-1, keepdims=False)
assert_array_equal(c, (a * b).sum(-1, keepdims=False))
c = inner1d(a, b, axis=-1, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = inner1d(a, b, axis=0, keepdims=False)
assert_array_equal(c, (a * b).sum(0, keepdims=False))
c = inner1d(a, b, axis=0, keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = inner1d(a, b, axes=[0, 0], keepdims=False)
assert_array_equal(c, (a * b).sum(0))
c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = inner1d(a, b, axes=[0, 2], keepdims=False)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
c = inner1d(a, b, axes=[0, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
# Hardly useful, but should work.
c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
.sum(1, keepdims=True))
# Check with two core dimensions.
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected = uml.det(a)
c = uml.det(a, keepdims=False)
assert_array_equal(c, expected)
c = uml.det(a, keepdims=True)
assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected_s, expected_l = uml.slogdet(a)
cs, cl = uml.slogdet(a, keepdims=False)
assert_array_equal(cs, expected_s)
assert_array_equal(cl, expected_l)
cs, cl = uml.slogdet(a, keepdims=True)
assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
# Sanity check on innerwt.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
np.sum(a * b * w, axis=-1, keepdims=True))
assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
np.sum(a * b * w, axis=0, keepdims=True))
# Check errors.
# Not a boolean
assert_raises(TypeError, inner1d, a, b, keepdims='true')
# More than 1 core dimension, and core output dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, keepdims=True)
assert_raises(TypeError, mm, a, b, keepdims=False)
# Regular ufuncs should not accept keepdims.
assert_raises(TypeError, np.add, 1., 1., keepdims=False)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_cross1d(self):
"""Test with fixed-sized signature."""
a = np.eye(3)
assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
out = np.zeros((3, 3))
result = umt.cross1d(a[0], a, out)
assert_(result is out)
assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
# Wrong output core dimension.
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
# Wrong output broadcast dimension (see gh-15139).
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3))
def test_can_ignore_signature(self):
# Comparing the effects of ? in signature:
# matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
# matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
mat = np.arange(12).reshape((2, 3, 2))
single_vec = np.arange(2)
col_vec = single_vec[:, np.newaxis]
col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
# matrix @ single column vector with proper dimension
mm_col_vec = umt.matrix_multiply(mat, col_vec)
# matmul does the same thing
matmul_col_vec = umt.matmul(mat, col_vec)
assert_array_equal(matmul_col_vec, mm_col_vec)
# matrix @ vector without dimension making it a column vector.
# matrix multiply fails -> missing core dim.
assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
# matmul mimicker passes, and returns a vector.
matmul_col = umt.matmul(mat, single_vec)
assert_array_equal(matmul_col, mm_col_vec.squeeze())
# Now with a column array: same as for column vector,
# broadcasting sensibly.
mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
matmul_col_vec = umt.matmul(mat, col_vec_array)
assert_array_equal(matmul_col_vec, mm_col_vec)
# As above, but for row vector
single_vec = np.arange(3)
row_vec = single_vec[np.newaxis, :]
row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
# row vector @ matrix
mm_row_vec = umt.matrix_multiply(row_vec, mat)
matmul_row_vec = umt.matmul(row_vec, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# single row vector @ matrix
assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
matmul_row = umt.matmul(single_vec, mat)
assert_array_equal(matmul_row, mm_row_vec.squeeze())
# row vector array @ matrix
mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
matmul_row_vec = umt.matmul(row_vec_array, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# Now for vector combinations
# row vector @ column vector
col_vec = row_vec.T
col_vec_array = row_vec_array.swapaxes(-2, -1)
mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
matmul_row_col_vec = umt.matmul(row_vec, col_vec)
assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
# single row vector @ single col vector
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
matmul_row_col = umt.matmul(single_vec, single_vec)
assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
# row vector array @ matrix
mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
assert_array_equal(matmul_row_col_array, mm_row_col_array)
# Finally, check that things are *not* squeezed if one gives an
# output.
out = np.zeros_like(mm_row_col_array)
out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
out[:] = 0
out = umt.matmul(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
# And check one cannot put missing dimensions back.
out = np.zeros_like(mm_row_col_vec)
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
out)
# But fine for matmul, since it is just a broadcast.
out = umt.matmul(single_vec, single_vec, out)
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.int64)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
assert_array_equal(res, np.zeros((0, 0)))
res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
assert_array_equal(res, np.zeros((10, 10)))
def compare_matrix_multiply_results(self, tp):
d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base is not None
ref = ref and a2.base is not None
if (a1.shape[-1] == a2.shape[-2] and
broadcastable(a1.shape[0], a2.shape[0])):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg=msg + ' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
b = b[~np.tri(a.shape[0], dtype=bool)]
assert_almost_equal(out, b)
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
def test_cumsum(self):
a = np.arange(10)
result = umt.cumsum(a)
assert_array_equal(result, a.cumsum())
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_comparison(self):
class HasComparisons:
def __eq__(self, other):
return '=='
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
assert_equal(np.array([[1], [2, 3]], dtype=object)
.sum(initial=[0], where=[False, True]), [0, 2, 3])
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
arr = np.ones(4, dtype=object)
arr[:] = [[1] for i in range(4)]
# Twice reproduced also for tuples:
np.add.accumulate(arr, out=arr)
np.add.accumulate(arr, out=arr)
assert_array_equal(arr,
np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object),
)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
np.add.accumulate(arr, out=arr, axis=-1)
np.add.accumulate(arr, out=arr, axis=-1)
assert_array_equal(arr[0, :],
np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
)
def test_object_array_accumulate_failure(self):
# Typical accumulation on object works as expected:
res = np.add.accumulate(np.array([1, 0, 2], dtype=object))
assert_array_equal(res, np.array([1, 1, 3], dtype=object))
# But errors are propagated from the inner-loop if they occur:
with pytest.raises(TypeError):
np.add.accumulate([1, None, 2])
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
arr = np.empty(4, dtype=object)
arr[:] = [[1] for i in range(4)]
out = np.empty(4, dtype=object)
out[:] = [[1] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr)
np.add.reduceat(arr, np.arange(4), out=arr)
assert_array_equal(arr, out)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
out = np.ones((2, 4), dtype=object)
out[0, :] = [[2] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
def test_object_array_reduceat_failure(self):
# Reduceat works as expected when no invalid operation occurs (None is
# not involved in an operation here)
res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2])
assert_array_equal(res, np.array([None, 2], dtype=object))
# But errors when None would be involved in an operation:
with pytest.raises(TypeError):
np.add.reduceat([1, None, 2], [0, 2])
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def test_where_param_alloc(self):
# With casting and allocated output
a = np.array([1], dtype=np.int64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
# No casting and allocated output
a = np.array([1], dtype=np.float64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
def test_where_with_broadcasting(self):
# See gh-17198
a = np.random.random((5000, 4))
b = np.random.random((5000, 1))
where = a > 0.3
out = np.full_like(a, 0)
np.less(a, b, where=where, out=out)
b_where = np.broadcast_to(b, a.shape)[where]
assert_array_equal((a[where] < b_where), out[where].astype(bool))
assert not out[~where].any() # outside mask, out remains all 0
def check_identityless_reduction(self, a):
# np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
@requires_memory(6 * 1024**3)
def test_identityless_reduction_huge_array(self):
# Regression test for gh-20921 (copying identity incorrectly failed)
arr = np.zeros((2, 2**31), 'uint8')
arr[:, 0] = [1, 3]
arr[:, -1] = [4, 1]
res = np.maximum.reduce(arr, axis=0)
del arr
assert res[0] == 3
assert res[-1] == 4
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_initial_reduction(self):
# np.minimum.reduce is an identityless reduction
# For cases like np.maximum(np.abs(...), initial=0)
# More generally, a supremum over non-negative numbers.
assert_equal(np.maximum.reduce([], initial=0), 0)
# For cases like reduction of an empty array over the reals.
assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
# Random tests
assert_equal(np.minimum.reduce([5], initial=4), 4)
assert_equal(np.maximum.reduce([4], initial=5), 5)
assert_equal(np.maximum.reduce([5], initial=4), 5)
assert_equal(np.minimum.reduce([4], initial=5), 4)
# Check initial=None raises ValueError for both types of ufunc reductions
assert_raises(ValueError, np.minimum.reduce, [], initial=None)
assert_raises(ValueError, np.add.reduce, [], initial=None)
# Check that np._NoValue gives default behavior.
assert_equal(np.add.reduce([], initial=np._NoValue), 0)
# Check that initial kwarg behaves as intended for dtype=object
a = np.array([10], dtype=object)
res = np.add.reduce(a, initial=5)
assert_equal(res, 15)
@pytest.mark.parametrize('axis', (0, 1, None))
@pytest.mark.parametrize('where', (np.array([False, True, True]),
np.array([[True], [False], [True]]),
np.array([[True, False, False],
[False, True, False],
[False, True, True]])))
def test_reduction_with_where(self, axis, where):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.zeros_like(a)
np.positive(a, out=a_check, where=where)
res = np.add.reduce(a, axis=axis, where=where)
check = a_check.sum(axis)
assert_equal(res, check)
# Check we do not overwrite elements of a internally.
assert_array_equal(a, a_copy)
@pytest.mark.parametrize(('axis', 'where'),
((0, np.array([True, False, True])),
(1, [True, True, False]),
(None, True)))
@pytest.mark.parametrize('initial', (-np.inf, 5.))
def test_reduction_with_where_and_initial(self, axis, where, initial):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.full(a.shape, -np.inf)
np.positive(a, out=a_check, where=where)
res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
check = a_check.max(axis, initial=initial)
assert_equal(res, check)
def test_reduction_where_initial_needed(self):
a = np.arange(9.).reshape(3, 3)
m = [False, True, False]
assert_raises(ValueError, np.maximum.reduce, a, where=m)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n // 2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=_rational_tests.rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = _rational_tests.test_add(a, b, c)
target = np.array([0, 2, 4], dtype=_rational_tests.rational)
assert_equal(result, target)
# The new resolution means that we can (usually) find custom loops
# as long as they match exactly:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
# This works even more generally, so long the default common-dtype
# promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
# But, it can be fooled, e.g. (use scalars, which forces legacy
# type resolution to kick in, which then fails):
with assert_raises(TypeError):
_rational_tests.test_add(a, np.uint16(2))
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core._struct_ufunc_tests as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
assert_raises(RuntimeError, struct_ufunc.register_fail)
def test_custom_ufunc(self):
a = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
b = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
result = _rational_tests.test_add_rationals(a, b)
expected = np.array(
[_rational_tests.rational(1),
_rational_tests.rational(2, 3),
_rational_tests.rational(1, 2)],
dtype=_rational_tests.rational)
assert_equal(result, expected)
def test_custom_ufunc_forced_sig(self):
# gh-9351 - looking for a non-first userloop would previously hang
with assert_raises(TypeError):
np.multiply(_rational_tests.rational(1), 1,
signature=(_rational_tests.rational, int, None))
def test_custom_array_like(self):
class MyThing:
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=object)
assert_raises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
# Test maximum
a = np.array([1, 2, 3])
np.maximum.at(a, [0], 0)
assert_equal(np.array([1, 2, 3]), a)
def test_at_not_none_signature(self):
# Test ufuncs with non-trivial signature raise a TypeError
a = np.ones((2, 2, 2))
b = np.ones((1, 2, 2))
assert_raises(TypeError, np.matmul.at, a, [0], b)
a = np.array([[[1, 2], [3, 4]]])
assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0])
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
o = np.ones((2,), dtype=d.dtype)
r = o * 5
assert_equal(f(d), r)
# a, axis=0, dtype=None, out=None, keepdims=False
assert_equal(f(d, axis=0), r)
assert_equal(f(d, 0), r)
assert_equal(f(d, 0, dtype=None), r)
assert_equal(f(d, 0, dtype='i'), r)
assert_equal(f(d, 0, 'i'), r)
assert_equal(f(d, 0, None), r)
assert_equal(f(d, 0, None, out=None), r)
assert_equal(f(d, 0, None, out=o), r)
assert_equal(f(d, 0, None, o), r)
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
assert_equal(f(d, 0, None, None, False, 0), r)
assert_equal(f(d, 0, None, None, False, initial=0), r)
assert_equal(f(d, 0, None, None, False, 0, True), r)
assert_equal(f(d, 0, None, None, False, 0, where=True), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
where=True), r)
# too little
assert_raises(TypeError, f)
# too much
assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
assert_raises(TypeError, f, d, axis="invalid", dtype=None,
keepdims=True)
# invalid dtype
assert_raises(TypeError, f, d, 0, "invalid")
assert_raises(TypeError, f, d, dtype="invalid")
assert_raises(TypeError, f, d, dtype="invalid", out=None)
# invalid out
assert_raises(TypeError, f, d, 0, None, "invalid")
assert_raises(TypeError, f, d, out="invalid")
assert_raises(TypeError, f, d, out="invalid", dtype=None)
# keepdims boolean, no invalid value
# assert_raises(TypeError, f, d, 0, None, None, "invalid")
# assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
# invalid mix
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
# invalid keyword
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
out=None)
assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None,
out=None, invalid=0)
def test_structured_equal(self):
# https://github.com/numpy/numpy/issues/4855
class MyA(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*(input.view(np.ndarray)
for input in inputs), **kwargs)
a = np.arange(12.).reshape(4,3)
ra = a.view(dtype=('f8,f8,f8')).squeeze()
mra = ra.view(MyA)
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
def test_scalar_equal(self):
# Scalar comparisons should always work, without deprecation warnings.
# even when the ufunc fails.
a = np.array(0.)
b = np.array('a')
assert_(a != b)
assert_(b != a)
assert_(not (a == b))
assert_(not (b == a))
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.maximum, np.minimum, np.mod,
np.greater, np.greater_equal, np.less, np.less_equal,
np.equal, np.not_equal]
a = np.array('1')
b = 1
c = np.array([1., 2.])
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
assert_raises(TypeError, f, c, a)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or]) # logical_xor object loop is bad
@pytest.mark.parametrize("signature",
[(None, None, object), (object, None, None),
(None, object, None)])
def test_logical_ufuncs_object_signatures(self, ufunc, signature):
a = np.array([True, None, False], dtype=object)
res = ufunc(a, a, signature=signature)
assert res.dtype == object
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@pytest.mark.parametrize("signature",
[(bool, None, object), (object, None, bool),
(None, object, bool)])
def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
# Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
a = np.array([True, None, False])
with pytest.raises(TypeError):
ufunc(a, a, signature=signature)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_support_anything(self, ufunc):
# The logical ufuncs support even input that can't be promoted:
a = np.array(b'1', dtype="V3")
c = np.array([1., 2.])
assert_array_equal(ufunc(a, c), ufunc([True, True], True))
assert ufunc.reduce(a) == True
# check that the output has no effect:
out = np.zeros(2, dtype=np.int32)
expected = ufunc([True, True], True).astype(out.dtype)
assert_array_equal(ufunc(a, c, out=out), expected)
out = np.zeros((), dtype=np.int32)
assert ufunc.reduce(a, out=out) == True
# Last check, test reduction when out and a match (the complexity here
# is that the "i,i->?" may seem right, but should not match.
a = np.array([3], dtype="i")
out = np.zeros((), dtype=a.dtype)
assert ufunc.reduce(a, out=out) == 1
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_reject_string(self, ufunc):
"""
Logical ufuncs are normally well defined by working with the boolean
equivalent, i.e. casting all inputs to bools should work.
However, casting strings to bools is *currently* weird, because it
actually uses `bool(int(str))`. Thus we explicitly reject strings.
This test should succeed (and can probably just be removed) as soon as
string to bool casts are well defined in NumPy.
"""
with pytest.raises(TypeError, match="contain a loop with signature"):
ufunc(["1"], ["3"])
with pytest.raises(TypeError, match="contain a loop with signature"):
ufunc.reduce(["1", "2", "0"])
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_out_cast_check(self, ufunc):
a = np.array('1')
c = np.array([1., 2.])
out = a.copy()
with pytest.raises(TypeError):
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
def test_reducelike_byteorder_resolution(self):
# See gh-20699, byte-order changes need some extra care in the type
# resolution to make the following succeed:
arr_be = np.arange(10, dtype=">i8")
arr_le = np.arange(10, dtype="<i8")
assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
assert_array_equal(
np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
def test_reducelike_out_promotes(self):
# Check that the out argument to reductions is considered for
# promotion. See also gh-20455.
# Note that these paths could prefer `initial=` in the future and
# do not up-cast to the default integer for add and prod
arr = np.ones(1000, dtype=np.uint8)
out = np.zeros((), dtype=np.uint16)
assert np.add.reduce(arr, out=out) == 1000
arr[:10] = 2
assert np.multiply.reduce(arr, out=out) == 2**10
# For legacy dtypes, the signature currently has to be forced if `out=`
# is passed. The two paths below should differ, without `dtype=` the
# expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
arr = np.full(5, 2**25-1, dtype=np.int64)
# float32 and int64 promote to float64:
res = np.zeros((), dtype=np.float32)
# If `dtype=` is passed, the calculation is forced to float32:
single_res = np.zeros((), dtype=np.float32)
np.multiply.reduce(arr, out=single_res, dtype=np.float32)
assert single_res != res
def test_reducelike_output_needs_identical_cast(self):
# Checks the case where the we have a simple byte-swap works, maily
# tests that this is not rejected directly.
# (interesting because we require descriptor identity in reducelikes).
arr = np.ones(20, dtype="f8")
out = np.empty((), dtype=arr.dtype.newbyteorder())
expected = np.add.reduce(arr)
np.add.reduce(arr, out=out)
assert_array_equal(expected, out)
# Check reduceat:
out = np.empty(2, dtype=arr.dtype.newbyteorder())
expected = np.add.reduceat(arr, [0, 1])
np.add.reduceat(arr, [0, 1], out=out)
assert_array_equal(expected, out)
# And accumulate:
out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
expected = np.add.accumulate(arr)
np.add.accumulate(arr, out=out)
assert_array_equal(expected, out)
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
#
# gh-8036
x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
y = y_base[::2,:]
y_base_copy = y_base.copy()
r0 = np.add.reduce(x, out=y.copy(), axis=2)
r1 = np.add.reduce(x, out=y, axis=2)
# The results should match, and y_base shouldn't get clobbered
assert_equal(r0, r1)
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
@pytest.mark.parametrize("with_cast", [True, False])
def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast):
# Should raise an error mentioning "shape" or "size"
arr = np.arange(5)
out = np.arange(3) # definitely wrong shape
if with_cast:
# If a cast is necessary on the output, we can be sure to use
# the generic NpyIter (non-fast) path.
out = out.astype(np.float64)
with pytest.raises(ValueError, match="(shape|size)"):
np.add.reduceat(arr, [0, 3], out=out)
with pytest.raises(ValueError, match="(shape|size)"):
np.add.accumulate(arr, out=out)
@pytest.mark.parametrize('out_shape',
[(), (1,), (3,), (1, 1), (1, 3), (4, 3)])
@pytest.mark.parametrize('keepdims', [True, False])
@pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce])
def test_reduce_wrong_dimension_output(self, f_reduce, keepdims, out_shape):
# Test that we're not incorrectly broadcasting dimensions.
# See gh-15144 (failed for np.add.reduce previously).
a = np.arange(12.).reshape(4, 3)
out = np.empty(out_shape, a.dtype)
correct_out = f_reduce(a, axis=0, keepdims=keepdims)
if out_shape != correct_out.shape:
with assert_raises(ValueError):
f_reduce(a, axis=0, out=out, keepdims=keepdims)
else:
check = f_reduce(a, axis=0, out=out, keepdims=keepdims)
assert_(check is out)
assert_array_equal(check, correct_out)
def test_reduce_output_does_not_broadcast_input(self):
# Test that the output shape cannot broadcast an input dimension
# (it never can add dimensions, but it might expand an existing one)
a = np.ones((1, 10))
out_correct = (np.empty((1, 1)))
out_incorrect = np.empty((3, 1))
np.add.reduce(a, axis=-1, out=out_correct, keepdims=True)
np.add.reduce(a, axis=-1, out=out_correct[:, 0], keepdims=False)
with assert_raises(ValueError):
np.add.reduce(a, axis=-1, out=out_incorrect, keepdims=True)
with assert_raises(ValueError):
np.add.reduce(a, axis=-1, out=out_incorrect[:, 0], keepdims=False)
def test_reduce_output_subclass_ok(self):
class MyArr(np.ndarray):
pass
out = np.empty(())
np.add.reduce(np.ones(5), out=out) # no subclass, all fine
out = out.view(MyArr)
assert np.add.reduce(np.ones(5), out=out) is out
assert type(np.add.reduce(out)) is MyArr
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
def test_invalid_args(self):
# gh-7961
exc = pytest.raises(TypeError, np.sqrt, None)
# minimally check the exception text
assert exc.match('loop of ufunc does not support')
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_finite(self, nat):
try:
assert not np.isfinite(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_nan(self, nat):
try:
assert np.isnan(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_inf(self, nat):
try:
assert not np.isinf(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_types(ufunc):
'''
Check all ufuncs that the correct type is returned. Avoid
object and boolean types since many operations are not defined for
for them.
Choose the shape so even dot and matmul will succeed
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if 'O' in typ or '?' in typ:
continue
inp, out = typ.split('->')
args = [np.ones((3, 3), t) for t in inp]
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res = ufunc(*args)
if isinstance(res, tuple):
outs = tuple(out)
assert len(res) == len(outs)
for r, t in zip(res, outs):
assert r.dtype == np.dtype(t)
else:
assert res.dtype == np.dtype(out)
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_noncontiguous(ufunc):
'''
Check that contiguous and non-contiguous calls to ufuncs
have the same results for values in range(9)
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if any(set('O?mM') & set(typ)):
# bool, object, datetime are too irregular for this simple test
continue
inp, out = typ.split('->')
args_c = [np.empty(6, t) for t in inp]
args_n = [np.empty(18, t)[::3] for t in inp]
for a in args_c:
a.flat = range(1,7)
for a in args_n:
a.flat = range(1,7)
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res_c = ufunc(*args_c)
res_n = ufunc(*args_n)
if len(out) == 1:
res_c = (res_c,)
res_n = (res_n,)
for c_ar, n_ar in zip(res_c, res_n):
dt = c_ar.dtype
if np.issubdtype(dt, np.floating):
# for floating point results allow a small fuss in comparisons
# since different algorithms (libm vs. intrinsics) can be used
# for different input strides
res_eps = np.finfo(dt).eps
tol = 2*res_eps
assert_allclose(res_c, res_n, atol=tol, rtol=tol)
else:
assert_equal(c_ar, n_ar)
@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
def test_ufunc_warn_with_nan(ufunc):
# issue gh-15127
# test that calling certain ufuncs with a non-standard `nan` value does not
# emit a warning
# `b` holds a 64 bit signaling nan: the most significant bit of the
# significand is zero.
b = np.array([0x7ff0000000000001], 'i8').view('f8')
assert np.isnan(b)
if ufunc.nin == 1:
ufunc(b)
elif ufunc.nin == 2:
ufunc(b, b.copy())
else:
raise ValueError('ufunc with more than 2 inputs')
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_casterrors():
# Tests that casting errors are correctly reported and buffers are
# cleared.
# The following array can be added to itself as an object array, but
# the result cannot be cast to an integer output:
value = 123 # relies on python cache (leak-check will still find it)
arr = np.array([value] * int(np.BUFSIZE * 1.5) +
["string"] +
[value] * int(1.5 * np.BUFSIZE), dtype=object)
out = np.ones(len(arr), dtype=np.intp)
count = sys.getrefcount(value)
with pytest.raises(ValueError):
# Output casting failure:
np.add(arr, arr, out=out, casting="unsafe")
assert count == sys.getrefcount(value)
# output is unchanged after the error, this shows that the iteration
# was aborted (this is not necessarily defined behaviour)
assert out[-1] == 1
with pytest.raises(ValueError):
# Input casting failure:
np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
assert count == sys.getrefcount(value)
# output is unchanged after the error, this shows that the iteration
# was aborted (this is not necessarily defined behaviour)
assert out[-1] == 1
def test_trivial_loop_invalid_cast():
# This tests the fast-path "invalid cast", see gh-19904.
with pytest.raises(TypeError,
match="cast ufunc 'add' input 0"):
# the void dtype definitely cannot cast to double:
np.add(np.array(1, "i,i"), 3, signature="dd->d")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize("offset",
[0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
def test_reduce_casterrors(offset):
# Test reporting of casting errors in reductions, we test various
# offsets to where the casting error will occur, since these may occur
# at different places during the reduction procedure. For example
# the first item may be special.
value = 123 # relies on python cache (leak-check will still find it)
arr = np.array([value] * offset +
["string"] +
[value] * int(1.5 * np.BUFSIZE), dtype=object)
out = np.array(-1, dtype=np.intp)
count = sys.getrefcount(value)
with pytest.raises(ValueError, match="invalid literal"):
# This is an unsafe cast, but we currently always allow that.
# Note that the double loop is picked, but the cast fails.
np.add.reduce(arr, dtype=np.intp, out=out)
assert count == sys.getrefcount(value)
# If an error occurred during casting, the operation is done at most until
# the error occurs (the result of which would be `value * offset`) and -1
# if the error happened immediately.
# This does not define behaviour, the output is invalid and thus undefined
assert out[()] < value * offset
@pytest.mark.parametrize("method",
[np.add.accumulate, np.add.reduce,
pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"),
pytest.param(lambda x: np.log.at(x, [2]), id="at")])
def test_ufunc_methods_floaterrors(method):
# adding inf and -inf (or log(-inf) creates an invalid float and warns
arr = np.array([np.inf, 0, -np.inf])
with np.errstate(all="warn"):
with pytest.warns(RuntimeWarning, match="invalid value"):
method(arr)
arr = np.array([np.inf, 0, -np.inf])
with np.errstate(all="raise"):
with pytest.raises(FloatingPointError):
method(arr)
def _check_neg_zero(value):
if value != 0.0:
return False
if not np.signbit(value.real):
return False
if value.dtype.kind == "c":
return np.signbit(value.imag)
return True
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_addition_negative_zero(dtype):
dtype = np.dtype(dtype)
if dtype.kind == "c":
neg_zero = dtype.type(complex(-0.0, -0.0))
else:
neg_zero = dtype.type(-0.0)
arr = np.array(neg_zero)
arr2 = np.array(neg_zero)
assert _check_neg_zero(arr + arr2)
# In-place ops may end up on a different path (reduce path) see gh-21211
arr += arr2
assert _check_neg_zero(arr)
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("use_initial", [True, False])
def test_addition_reduce_negative_zero(dtype, use_initial):
dtype = np.dtype(dtype)
if dtype.kind == "c":
neg_zero = dtype.type(complex(-0.0, -0.0))
else:
neg_zero = dtype.type(-0.0)
kwargs = {}
if use_initial:
kwargs["initial"] = neg_zero
else:
pytest.xfail("-0. propagation in sum currently requires initial")
# Test various length, in case SIMD paths or chunking play a role.
# 150 extends beyond the pairwise blocksize; probably not important.
for i in range(0, 150):
arr = np.array([neg_zero] * i, dtype=dtype)
res = np.sum(arr, **kwargs)
if i > 0 or use_initial:
assert _check_neg_zero(res)
else:
# `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])`
assert not np.signbit(res.real)
assert not np.signbit(res.imag)
| 108,584 | Python | 40.747405 | 91 | 0.536202 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_array_interface.py | import sys
import pytest
import numpy as np
from numpy.testing import extbuild
@pytest.fixture
def get_module(tmp_path):
""" Some codes to generate data and manage temporary buffers use when
sharing with numpy via the array interface protocol.
"""
if not sys.platform.startswith('linux'):
pytest.skip('link fails on cygwin')
prologue = '''
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
#include <stdio.h>
#include <math.h>
NPY_NO_EXPORT
void delete_array_struct(PyObject *cap) {
/* get the array interface structure */
PyArrayInterface *inter = (PyArrayInterface*)
PyCapsule_GetPointer(cap, NULL);
/* get the buffer by which data was shared */
double *ptr = (double*)PyCapsule_GetContext(cap);
/* for the purposes of the regression test set the elements
to nan */
for (npy_intp i = 0; i < inter->shape[0]; ++i)
ptr[i] = nan("");
/* free the shared buffer */
free(ptr);
/* free the array interface structure */
free(inter->shape);
free(inter);
fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
" ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
}
'''
functions = [
("new_array_struct", "METH_VARARGS", """
long long n_elem = 0;
double value = 0.0;
if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
Py_RETURN_NONE;
}
/* allocate and initialize the data to share with numpy */
long long n_bytes = n_elem*sizeof(double);
double *data = (double*)malloc(n_bytes);
if (!data) {
PyErr_Format(PyExc_MemoryError,
"Failed to malloc %lld bytes", n_bytes);
Py_RETURN_NONE;
}
for (long long i = 0; i < n_elem; ++i) {
data[i] = value;
}
/* calculate the shape and stride */
int nd = 1;
npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
npy_intp *shape = ss;
npy_intp *stride = ss + nd;
shape[0] = n_elem;
stride[0] = sizeof(double);
/* construct the array interface */
PyArrayInterface *inter = (PyArrayInterface*)
malloc(sizeof(PyArrayInterface));
memset(inter, 0, sizeof(PyArrayInterface));
inter->two = 2;
inter->nd = nd;
inter->typekind = 'f';
inter->itemsize = sizeof(double);
inter->shape = shape;
inter->strides = stride;
inter->data = data;
inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
/* package into a capsule */
PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
/* save the pointer to the data */
PyCapsule_SetContext(cap, data);
fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
" ptr = %ld\\n", (long)cap, (long)inter, (long)data);
return cap;
""")
]
more_init = "import_array();"
try:
import array_interface_testing
return array_interface_testing
except ImportError:
pass
# if it does not exist, build and load it
return extbuild.build_and_import_extension('array_interface_testing',
functions,
prologue=prologue,
include_dirs=[np.get_include()],
build_dir=tmp_path,
more_init=more_init)
@pytest.mark.slow
def test_cstruct(get_module):
class data_source:
"""
This class is for testing the timing of the PyCapsule destructor
invoked when numpy release its reference to the shared data as part of
the numpy array interface protocol. If the PyCapsule destructor is
called early the shared data is freed and invlaid memory accesses will
occur.
"""
def __init__(self, size, value):
self.size = size
self.value = value
@property
def __array_struct__(self):
return get_module.new_array_struct(self.size, self.value)
# write to the same stream as the C code
stderr = sys.__stderr__
# used to validate the shared data.
expected_value = -3.1415
multiplier = -10000.0
# create some data to share with numpy via the array interface
# assign the data an expected value.
stderr.write(' ---- create an object to share data ---- \n')
buf = data_source(256, expected_value)
stderr.write(' ---- OK!\n\n')
# share the data
stderr.write(' ---- share data via the array interface protocol ---- \n')
arr = np.array(buf, copy=False)
stderr.write('arr.__array_interface___ = %s\n' % (
str(arr.__array_interface__)))
stderr.write('arr.base = %s\n' % (str(arr.base)))
stderr.write(' ---- OK!\n\n')
# release the source of the shared data. this will not release the data
# that was shared with numpy, that is done in the PyCapsule destructor.
stderr.write(' ---- destroy the object that shared data ---- \n')
buf = None
stderr.write(' ---- OK!\n\n')
# check that we got the expected data. If the PyCapsule destructor we
# defined was prematurely called then this test will fail because our
# destructor sets the elements of the array to NaN before free'ing the
# buffer. Reading the values here may also cause a SEGV
assert np.allclose(arr, expected_value)
# read the data. If the PyCapsule destructor we defined was prematurely
# called then reading the values here may cause a SEGV and will be reported
# as invalid reads by valgrind
stderr.write(' ---- read shared data ---- \n')
stderr.write('arr = %s\n' % (str(arr)))
stderr.write(' ---- OK!\n\n')
# write to the shared buffer. If the shared data was prematurely deleted
# this will may cause a SEGV and valgrind will report invalid writes
stderr.write(' ---- modify shared data ---- \n')
arr *= multiplier
expected_value *= multiplier
stderr.write('arr.__array_interface___ = %s\n' % (
str(arr.__array_interface__)))
stderr.write('arr.base = %s\n' % (str(arr.base)))
stderr.write(' ---- OK!\n\n')
# read the data. If the shared data was prematurely deleted this
# will may cause a SEGV and valgrind will report invalid reads
stderr.write(' ---- read modified shared data ---- \n')
stderr.write('arr = %s\n' % (str(arr)))
stderr.write(' ---- OK!\n\n')
# check that we got the expected data. If the PyCapsule destructor we
# defined was prematurely called then this test will fail because our
# destructor sets the elements of the array to NaN before free'ing the
# buffer. Reading the values here may also cause a SEGV
assert np.allclose(arr, expected_value)
# free the shared data, the PyCapsule destructor should run here
stderr.write(' ---- free shared data ---- \n')
arr = None
stderr.write(' ---- OK!\n\n')
| 7,596 | Python | 34.009216 | 79 | 0.562138 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test__exceptions.py | """
Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
"""
import pickle
import pytest
import numpy as np
_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
class TestArrayMemoryError:
def test_pickling(self):
""" Test that _ArrayMemoryError can be pickled """
error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
res = pickle.loads(pickle.dumps(error))
assert res._total_size == error._total_size
def test_str(self):
e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
str(e) # not crashing is enough
# testing these properties is easier than testing the full string repr
def test__size_to_string(self):
""" Test e._size_to_string """
f = _ArrayMemoryError._size_to_string
Ki = 1024
assert f(0) == '0 bytes'
assert f(1) == '1 bytes'
assert f(1023) == '1023 bytes'
assert f(Ki) == '1.00 KiB'
assert f(Ki+1) == '1.00 KiB'
assert f(10*Ki) == '10.0 KiB'
assert f(int(999.4*Ki)) == '999. KiB'
assert f(int(1023.4*Ki)) == '1023. KiB'
assert f(int(1023.5*Ki)) == '1.00 MiB'
assert f(Ki*Ki) == '1.00 MiB'
# 1023.9999 Mib should round to 1 GiB
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
# larger than sys.maxsize, adding larger prefixes isn't going to help
# anyway.
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
def test__total_size(self):
""" Test e._total_size """
e = _ArrayMemoryError((1,), np.dtype(np.uint8))
assert e._total_size == 1
e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
assert e._total_size == 1024
class TestUFuncNoLoopError:
def test_pickling(self):
""" Test that _UFuncNoLoopError can be pickled """
assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
@pytest.mark.parametrize("args", [
(2, 1, None),
(2, 1, "test_prefix"),
("test message",),
])
class TestAxisError:
def test_attr(self, args):
"""Validate attribute types."""
exc = np.AxisError(*args)
if len(args) == 1:
assert exc.axis is None
assert exc.ndim is None
else:
axis, ndim, *_ = args
assert exc.axis == axis
assert exc.ndim == ndim
def test_pickling(self, args):
"""Test that `AxisError` can be pickled."""
exc = np.AxisError(*args)
exc2 = pickle.loads(pickle.dumps(exc))
assert type(exc) is type(exc2)
for name in ("axis", "ndim", "args"):
attr1 = getattr(exc, name)
attr2 = getattr(exc2, name)
assert attr1 == attr2, name
| 2,846 | Python | 30.988764 | 79 | 0.575193 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_item_selection.py | import sys
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
)
class TestTake:
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
modes = ['raise', 'wrap', 'clip']
indices = [-1, 4]
index_arrays = [np.empty(0, dtype=np.intp),
np.empty(tuple(), dtype=np.intp),
np.empty((1, 1), dtype=np.intp)]
real_indices = {'raise': {-1: 1, 4: IndexError},
'wrap': {-1: 1, 4: 0},
'clip': {-1: 0, 4: 1}}
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object, which has a size that hits the
# default (non-specialized) path.
types = int, object, np.dtype([('', 'i2', 3)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
tresult = list(ta.T.copy())
for index_array in index_arrays:
if index_array.size != 0:
tresult[0].shape = (2,) + index_array.shape
tresult[1].shape = (2,) + index_array.shape
for mode in modes:
for index in indices:
real_index = real_indices[mode][index]
if real_index is IndexError and index_array.size != 0:
index_array.put(0, index)
assert_raises(IndexError, ta.take, index_array,
mode=mode, axis=1)
elif index_array.size != 0:
index_array.put(0, index)
res = ta.take(index_array, mode=mode, axis=1)
assert_array_equal(res, tresult[real_index])
else:
res = ta.take(index_array, mode=mode, axis=1)
assert_(res.shape == (2,) + index_array.shape)
def test_refcounting(self):
objects = [object() for i in range(10)]
for mode in ('raise', 'clip', 'wrap'):
a = np.array(objects)
b = np.array([2, 2, 4, 5, 3, 5])
a.take(b, out=a[:6], mode=mode)
del a
if HAS_REFCOUNT:
assert_(all(sys.getrefcount(o) == 3 for o in objects))
# not contiguous, example:
a = np.array(objects * 2)[::2]
a.take(b, out=a[:6], mode=mode)
del a
if HAS_REFCOUNT:
assert_(all(sys.getrefcount(o) == 3 for o in objects))
def test_unicode_mode(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.take, 5, mode=k)
def test_empty_partition(self):
# In reference to github issue #6530
a_original = np.array([0, 2, 4, 6, 8, 10])
a = a_original.copy()
# An empty partition should be a successful no-op
a.partition(np.array([], dtype=np.int16))
assert_array_equal(a, a_original)
def test_empty_argpartition(self):
# In reference to github issue #6530
a = np.array([0, 2, 4, 6, 8, 10])
a = a.argpartition(np.array([], dtype=np.int16))
b = np.array([0, 1, 2, 3, 4, 5])
assert_array_equal(a, b)
| 3,579 | Python | 40.149425 | 80 | 0.488963 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_protocols.py | import pytest
import warnings
import numpy as np
@pytest.mark.filterwarnings("error")
def test_getattr_warning():
# issue gh-14735: make sure we clear only getattr errors, and let warnings
# through
class Wrapper:
def __init__(self, array):
self.array = array
def __len__(self):
return len(self.array)
def __getitem__(self, item):
return type(self)(self.array[item])
def __getattr__(self, name):
if name.startswith("__array_"):
warnings.warn("object got converted", UserWarning, stacklevel=1)
return getattr(self.array, name)
def __repr__(self):
return "<Wrapper({self.array})>".format(self=self)
array = Wrapper(np.arange(10))
with pytest.raises(UserWarning, match="object got converted"):
np.asarray(array)
def test_array_called():
class Wrapper:
val = '0' * 100
def __array__(self, result=None):
return np.array([self.val], dtype=object)
wrapped = Wrapper()
arr = np.array(wrapped, dtype=str)
assert arr.dtype == 'U100'
assert arr[0] == Wrapper.val
| 1,168 | Python | 24.977777 | 80 | 0.589041 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_indexing.py | import sys
import warnings
import functools
import operator
import pytest
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_warns, HAS_REFCOUNT,
)
class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0,:])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0,:])
assert_raises(IndexError, lambda: a[0.0,:,:])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4,:])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4,:])
assert_raises(IndexError, lambda: a[-1.4,:,:])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
assert_raises(IndexError, lambda: a[0.0:, 0.0])
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2,:])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0,:])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0,:])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
# held when the iterator does not need it, but the transfer function
# does
arr = np.zeros(1000)
indx = np.zeros(1000, dtype=bool)
indx[:100] = True
arr[indx] = np.ones(100, dtype=object)
expected = np.zeros(1000)
expected[:100] = 1
assert_array_equal(arr, expected)
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_boolean_indexing_list(self):
# Regression test for #13715. It's a use-after-free bug which the
# test won't directly catch, but it will show up in valgrind.
a = np.array([1, 2, 3])
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert_equal(a[b], [1, 3])
assert_equal(a[None, b], [[1, 3]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy('C')])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_trivial_fancy_not_possible(self):
# Test that the fast path for trivial assignment is not incorrectly
# used when the index is not contiguous or 1D, see also gh-11467.
a = np.arange(6)
idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
assert_array_equal(a[idx], idx)
# this case must not go into the fast path, note that idx is
# a non-contiuguous none 1D array here.
a[idx] = -1
res = np.arange(6)
res[0] = -1
res[3] = -1
assert_array_equal(a, res)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_array_like_values(self):
# Similar to the above test, but use a memoryview instead
a = np.zeros((5, 5))
s = np.arange(25, dtype=np.float64).reshape(5, 5)
a[[0, 1, 2, 3, 4], :] = memoryview(s)
assert_array_equal(a, s)
a[:, [0, 1, 2, 3, 4]] = memoryview(s)
assert_array_equal(a, s)
a[...] = memoryview(s)
assert_array_equal(a, s)
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
ind = np.array([False, True, True], dtype=bool)
assert_(d[ind].flags.writeable)
ind = np.array([0, 1])
assert_(d[ind].flags.writeable)
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero:
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike:
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
pass
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
def test_broken_sequence_not_nd_index(self):
# See gh-5063:
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike:
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError('Not possible')
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4))
arr = arro[::-1, ::-1]
slices = (slice(None), [0, 1, 2, 3])
arr[slices] = 10
assert_array_equal(arr, 10.)
def test_character_assignment(self):
# This is an example a function going through CopyObject which
# used to have an untested special path for scalars
# (the character special dtype case, should be deprecated probably)
arr = np.zeros((1, 5), dtype="c")
arr[0] = np.str_("asdfg") # must assign as a sequence
assert_array_equal(arr[0], np.array("asdfg", dtype="c"))
assert arr[0, 1] == b"s" # make sure not all were set to "a" for both
@pytest.mark.parametrize("index",
[True, False, np.array([0])])
@pytest.mark.parametrize("num", [32, 40])
@pytest.mark.parametrize("original_ndim", [1, 32])
def test_too_many_advanced_indices(self, index, num, original_ndim):
# These are limitations based on the number of arguments we can process.
# For `num=32` (and all boolean cases), the result is actually define;
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
arr = np.ones((1,) * original_ndim)
with pytest.raises(IndexError):
arr[(index,) * num]
with pytest.raises(IndexError):
arr[(index,) * num] = 1.
def test_structured_advanced_indexing(self):
# Test that copyswap(n) used by integer array indexing is threadsafe
# for structured datatypes, see gh-15387. This test can behave randomly.
from concurrent.futures import ThreadPoolExecutor
# Create a deeply nested dtype to make a failure more likely:
dt = np.dtype([("", "f8")])
dt = np.dtype([("", dt)] * 2)
dt = np.dtype([("", dt)] * 2)
# The array should be large enough to likely run into threading issues
arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0]
rng = np.random.default_rng()
def func(arr):
indx = rng.integers(0, len(arr), size=6000, dtype=np.intp)
arr[indx]
tpe = ThreadPoolExecutor(max_workers=8)
futures = [tpe.submit(func, arr) for _ in range(10)]
for f in futures:
f.result()
assert arr.dtype is dt
def test_nontuple_ndindex(self):
a = np.arange(25).reshape((5, 5))
assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
assert_raises(IndexError, a.__getitem__, [slice(None)])
class TestFieldIndexing:
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments:
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
@pytest.mark.parametrize("index", [
(..., [1, 2], slice(None)),
([0, 1], ..., 0),
(..., [1, 2], [1, 2])])
def test_broadcast_error_reports_correct_shape(self, index):
values = np.zeros((100, 100)) # will never broadcast below
arr = np.zeros((3, 4, 5, 6, 7))
# We currently report without any spaces (could be changed)
shape_str = str(arr[index].shape).replace(" ", "")
with pytest.raises(ValueError) as e:
arr[index] = values
assert str(e.value).endswith(shape_str)
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
# and that the base is set up correctly: the original subclass
# instance for views, and a new ndarray for advanced/boolean indexing
# where a copy was made (latter a regression test for gh-11983).
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s_slice = s[:3]
assert_(type(s_slice) is SubClass)
assert_(s_slice.base is s)
assert_array_equal(s_slice, a[:3])
s_fancy = s[[0, 1, 2]]
assert_(type(s_fancy) is SubClass)
assert_(s_fancy.base is not s)
assert_(type(s_fancy.base) is np.ndarray)
assert_array_equal(s_fancy, a[[0, 1, 2]])
assert_array_equal(s_fancy.base, a[[0, 1, 2]])
s_bool = s[s > 0]
assert_(type(s_bool) is SubClass)
assert_(s_bool.base is not s)
assert_(type(s_bool.base) is np.ndarray)
assert_array_equal(s_bool, a[a > 0])
assert_array_equal(s_bool.base, a[a > 0])
def test_fancy_on_read_only(self):
# Test that fancy indexing on read-only SubClass does not make a
# read-only copy (gh-14132)
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s.flags.writeable = False
s_fancy = s[[0, 1, 2]]
assert_(s_fancy.flags.writeable)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
class TestFancyIndexingCast:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence:
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
# Check that swapping of axes works.
# There was a bug that made the later assignment throw a ValueError
# do to an incorrectly transposed temporary right hand side (gh-5714)
b = b.T
b[:3, [0]] = [[1], [(1,2)], [3]]
assert_array_equal(a, b[:, 0])
# Another test for the memory order of the subspace
arr = np.ones((3, 4, 5), dtype=object)
# Equivalent slicing assignment for comparison
cmp_arr = arr.copy()
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
arr = arr.copy('F')
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setup_method(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
try:
indx = np.array(indx, dtype=np.intp)
except ValueError:
raise IndexError
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax+indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
try:
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
except ValueError:
# too many dimensions, probably
raise IndexError
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestFloatNonIntegerArgument:
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0,:]
a[:,:,:]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float_(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3,3,3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (.2, 1.2))
class TestBooleanIndexing:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
a[False, True, ...].shape == (0, 2, 3, 4)
a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_fast_path(self):
# These used to either give the wrong error, or incorrectly give no
# error.
a = np.ones((3, 3))
# This used to incorrectly work (and give an array of shape (0,))
idx1 = np.array([[False]*9])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 0; "
"dimension is 3 but corresponding boolean dimension is 1",
lambda: a[idx1])
# This used to incorrectly give a ValueError: operands could not be broadcast together
idx2 = np.array([[False]*8 + [True]])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 0; "
"dimension is 3 but corresponding boolean dimension is 1",
lambda: a[idx2])
# This is the same as it used to be. The above two should work like this.
idx3 = np.array([[False]*10])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 0; "
"dimension is 3 but corresponding boolean dimension is 1",
lambda: a[idx3])
# This used to give ValueError: non-broadcastable operand
a = np.ones((1, 1, 2))
idx = np.array([[[True], [False]]])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 1; "
"dimension is 1 but corresponding boolean dimension is 2",
lambda: a[idx])
class TestArrayToIndexDeprecation:
"""Creating an index from array not 0-D is an error.
"""
def test_array_to_index_error(self):
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
assert_raises(TypeError, operator.index, np.array([1]))
assert_raises(TypeError, np.reshape, a, (a, -1))
assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike:
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
an integer.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
# The following is valid
a.__getitem__([])
class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
class TestCApiAccess:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
| 54,234 | Python | 37.247532 | 94 | 0.534831 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/examples/limited_api/setup.py | """
Build an example package using the limited Python C API.
"""
import numpy as np
from setuptools import setup, Extension
import os
macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")]
limited_api = Extension(
"limited_api",
sources=[os.path.join('.', "limited_api.c")],
include_dirs=[np.get_include()],
define_macros=macros,
)
extensions = [limited_api]
setup(
ext_modules=extensions
)
| 435 | Python | 17.956521 | 73 | 0.673563 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/examples/cython/setup.py | """
Provide python-space access to the functions exposed in numpy/__init__.pxd
for testing.
"""
import numpy as np
from distutils.core import setup
from Cython.Build import cythonize
from setuptools.extension import Extension
import os
macros = [("NPY_NO_DEPRECATED_API", 0)]
checks = Extension(
"checks",
sources=[os.path.join('.', "checks.pyx")],
include_dirs=[np.get_include()],
define_macros=macros,
)
extensions = [checks]
setup(
ext_modules=cythonize(extensions)
)
| 496 | Python | 18.115384 | 74 | 0.707661 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/__init__.py | """
Compatibility module.
This module contains duplicated code from Python itself or 3rd party
extensions, which may be included for the following reasons:
* compatibility
* we may only need a small subset of the copied library/module
"""
from . import _inspect
from . import _pep440
from . import py3k
from ._inspect import getargspec, formatargspec
from .py3k import *
__all__ = []
__all__.extend(_inspect.__all__)
__all__.extend(py3k.__all__)
| 454 | Python | 21.749999 | 68 | 0.722467 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/_pep440.py | """Utility to compare pep440 compatible version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
# Copyright (c) Donald Stufft and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collections
import itertools
import re
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
]
# BEGIN packaging/_structures.py
class Infinity:
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity:
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
# BEGIN packaging/version.py
NegativeInfinity = NegativeInfinity()
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion:
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# its adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We assume there is an implicit 0 in a pre-release if there is
# no numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower-case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume that if we are given a number but not given a letter,
# then this is using the implicit post release syntax (e.g., 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non-zero, then take the rest,
# re-reverse it back into the correct order, and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre-segment, but we _only_ want to do this
# if there is no pre- or a post-segment. If we have one of those, then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post-segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alphanumeric segments sort before numeric segments
# - Alphanumeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| 14,069 | Python | 27.831967 | 79 | 0.574597 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/_inspect.py | """Subset of inspect module from upstream python
We use this instead of upstream because upstream inspect is slow to import, and
significantly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
import types
__all__ = ['getargspec', 'formatargspec']
# ----------------------------------------------------------- type-checking
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None
"""
return isinstance(object, types.MethodType)
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)
"""
return isinstance(object, types.FunctionType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables
"""
return isinstance(object, types.CodeType)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None.
"""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
# The following acrobatics are for anonymous (tuple) arguments.
# Which we do not need to support, so remove to avoid importing
# the dis module.
for i in range(nargs):
if args[i][:1] in ['', '.']:
raise TypeError("tuple function arguments are not supported")
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.__code__)
return args, varargs, varkw, func.__defaults__
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame.
"""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element.
"""
if type(object) in [list, tuple]:
return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + ', '.join(specs) + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = [strseq(arg, convert, join) for arg in args]
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
| 7,447 | Python | 37.791666 | 79 | 0.633544 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/py3k.py | """
Python 3.X compatibility tools.
While this file was originally intended for Python 2 -> 3 transition,
it is now used to create a compatibility layer between different
minor versions of Python 3.
While the active version of numpy may not support a given version of python, we
allow downstream libraries to continue to use these shims for forward
compatibility with numpy while they transition their code to newer versions of
Python.
"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
import sys
import os
from pathlib import Path
import io
try:
import pickle5 as pickle
except ImportError:
import pickle
long = int
integer_types = (int,)
basestring = str
unicode = str
bytes = bytes
def asunicode(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def asbytes(s):
if isinstance(s, bytes):
return s
return str(s).encode('latin1')
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def isfileobj(f):
return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
def sixu(s):
return s
strchar = 'U'
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
def is_pathlib_path(obj):
"""
Check whether obj is a `pathlib.Path` object.
Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
"""
return isinstance(obj, Path)
# from Python 3.7
class contextlib_nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
.. note::
Prefer using `contextlib.nullcontext` instead of this context manager.
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
def npy_load_module(name, fn, info=None):
"""
Load a module. Uses ``load_module`` which will be deprecated in python
3.12. An alternative that uses ``exec_module`` is in
numpy.distutils.misc_util.exec_mod_from_location
.. versionadded:: 1.11.2
Parameters
----------
name : str
Full module name.
fn : str
Path to module file.
info : tuple, optional
Only here for backward compatibility with Python 2.*.
Returns
-------
mod : module
"""
# Explicitly lazy import this to avoid paying the cost
# of importing importlib at startup
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, fn).load_module()
os_fspath = os.fspath
os_PathLike = os.PathLike
| 3,607 | Python | 25.144927 | 79 | 0.659551 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/tests/test_compat.py | from os.path import join
from numpy.compat import isfileobj
from numpy.testing import assert_
from numpy.testing import tempdir
def test_isfileobj():
with tempdir(prefix="numpy_test_compat_") as folder:
filename = join(folder, 'a.bin')
with open(filename, 'wb') as f:
assert_(isfileobj(f))
with open(filename, 'ab') as f:
assert_(isfileobj(f))
with open(filename, 'rb') as f:
assert_(isfileobj(f))
| 476 | Python | 22.849999 | 56 | 0.621849 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/__init__.py | """
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
The SciPy module `scipy.fft` is a more comprehensive superset
of ``numpy.fft``, which includes only a basic set of routines.
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the function from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
Implementation details
----------------------
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
Type Promotion
--------------
`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
``complex128`` arrays respectively. For an FFT implementation that does not
promote input arrays, see `scipy.fftpack`.
Normalization
-------------
The argument ``norm`` indicates which direction of the pair of direct/inverse
transforms is scaled and with what normalization factor.
The default normalization (``"backward"``) has the direct (forward) transforms
unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is
possible to obtain unitary transforms by setting the keyword argument ``norm``
to ``"ortho"`` so that both direct and inverse transforms are scaled by
:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to
``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse
transforms unscaled (i.e. exactly opposite to the default ``"backward"``).
`None` is an alias of the default option ``"backward"`` for backward
compatibility.
Real and Hermitian transforms
-----------------------------
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
Higher dimensions
-----------------
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
--------
For examples, see the various functions.
"""
from . import _pocketfft, helper
from ._pocketfft import *
from .helper import *
__all__ = _pocketfft.__all__.copy()
__all__ += helper.__all__
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 8,175 | Python | 37.384976 | 84 | 0.713394 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/setup.py | import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fft', parent_package, top_path)
config.add_subpackage('tests')
# AIX needs to be told to use large file support - at all times
defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else []
# Configure pocketfft_internal
config.add_extension('_pocketfft_internal',
sources=['_pocketfft.c'],
define_macros=defs,
)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 728 | Python | 30.695651 | 72 | 0.614011 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/_pocketfft.py | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1, norm="backward")
ifft(a, n=None, axis=-1, norm="backward")
rfft(a, n=None, axis=-1, norm="backward")
irfft(a, n=None, axis=-1, norm="backward")
hfft(a, n=None, axis=-1, norm="backward")
ihfft(a, n=None, axis=-1, norm="backward")
fftn(a, s=None, axes=None, norm="backward")
ifftn(a, s=None, axes=None, norm="backward")
rfftn(a, s=None, axes=None, norm="backward")
irfftn(a, s=None, axes=None, norm="backward")
fft2(a, s=None, axes=(-2,-1), norm="backward")
ifft2(a, s=None, axes=(-2, -1), norm="backward")
rfft2(a, s=None, axes=(-2,-1), norm="backward")
irfft2(a, s=None, axes=(-2, -1), norm="backward")
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
"""
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
import functools
from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt
from . import _pocketfft_internal as pfi
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.fft')
# `inv_norm` is a float by which the result of the transform needs to be
# divided. This replaces the original, more intuitive 'fct` parameter to avoid
# divisions by zero (or alternatively additional checks) in the case of
# zero-length axes during its computation.
def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
fct = 1/inv_norm
if a.shape[axis] != n:
s = list(a.shape)
index = [slice(None)]*len(s)
if s[axis] > n:
index[axis] = slice(0, n)
a = a[tuple(index)]
else:
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[tuple(index)] = a
a = z
if axis == a.ndim-1:
r = pfi.execute(a, is_real, is_forward, fct)
else:
a = swapaxes(a, axis, -1)
r = pfi.execute(a, is_real, is_forward, fct)
r = swapaxes(r, axis, -1)
return r
def _get_forward_norm(n, norm):
if n < 1:
raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
if norm is None or norm == "backward":
return 1
elif norm == "ortho":
return sqrt(n)
elif norm == "forward":
return n
raise ValueError(f'Invalid norm value {norm}; should be "backward",'
'"ortho" or "forward".')
def _get_backward_norm(n, norm):
if n < 1:
raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
if norm is None or norm == "backward":
return n
elif norm == "ortho":
return sqrt(n)
elif norm == "forward":
return 1
raise ValueError(f'Invalid norm value {norm}; should be "backward", '
'"ortho" or "forward".')
_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward",
"ortho": "ortho", "forward": "backward"}
def _swap_direction(norm):
try:
return _SWAP_DIRECTION_MAP[norm]
except KeyError:
raise ValueError(f'Invalid norm value {norm}; should be "backward", '
'"ortho" or "forward".') from None
def _fft_dispatcher(a, n=None, axis=None, norm=None):
return (a,)
@array_function_dispatch(_fft_dispatcher)
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_forward_norm(n, norm)
output = _raw_fft(a, n, axis, False, True, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, label='real')
[<matplotlib.lines.Line2D object at ...>]
>>> plt.plot(t, s.imag, '--', label='imaginary')
[<matplotlib.lines.Line2D object at ...>]
>>> plt.legend()
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_backward_norm(n, norm)
output = _raw_fft(a, n, axis, False, False, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_forward_norm(n, norm)
output = _raw_fft(a, n, axis, True, True, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def irfft(a, n=None, axis=-1, norm=None):
"""
Computes the inverse of `rfft`.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)`` where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
correct length of the real input **must** be given.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> np.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
inv_norm = _get_backward_norm(n, norm)
output = _raw_fft(a, n, axis, True, False, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``
where ``m`` is the length of the input along the axis specified by
`axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2`` where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `hfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
shape of the full signal **must** be given.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, -0.+0.j], # may vary
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
new_norm = _swap_direction(norm)
output = irfft(conjugate(a), n, axis, norm=new_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
new_norm = _swap_direction(norm)
output = conjugate(rfft(a, n, axis, norm=new_norm))
return output
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def _fftn_dispatcher(a, s=None, axes=None, norm=None):
return (a,)
@array_function_dispatch(_fftn_dispatcher)
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
@array_function_dispatch(_fftn_dispatcher)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
@array_function_dispatch(_fftn_dispatcher)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform.
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
@array_function_dispatch(_fftn_dispatcher)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
@array_function_dispatch(_fftn_dispatcher)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[8.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[4.+0.j, 0.+0.j], # may vary
[4.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
@array_function_dispatch(_fftn_dispatcher)
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.rfft2(a)
array([[ 50. +0.j , 0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])
"""
return rfftn(a, s, axes, norm)
@array_function_dispatch(_fftn_dispatcher)
def irfftn(a, s=None, axes=None, norm=None):
"""
Computes the inverse of `rfftn`.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to
be ``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
The correct interpretation of the hermitian input depends on the shape of
the original data, as given by `s`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfftn`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. When performing the
final complex to real transform, the last value is thus treated as purely
real. To avoid losing information, the correct shape of the real input
**must** be given.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
@array_function_dispatch(_fftn_dispatcher)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Computes the inverse of `rfft2`.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
rfft2 : The forward two-dimensional FFT of real input,
of which `irfft2` is the inverse.
rfft : The one-dimensional FFT for real input.
irfft : The inverse of the one-dimensional FFT of real input.
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> A = np.fft.rfft2(a)
>>> np.fft.irfft2(A, s=a.shape)
array([[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3.],
[4., 4., 4., 4., 4.]])
"""
return irfftn(a, s, axes, norm)
| 52,897 | Python | 36.121403 | 90 | 0.61132 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/helper.py | """
Discrete Fourier Transforms - helper.py
"""
from numpy.core import integer, empty, arange, asarray, roll
from numpy.core.overrides import array_function_dispatch, set_module
# Created by Pearu Peterson, September 2002
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
integer_types = (int, integer)
def _fftshift_dispatcher(x, axes=None):
return (x,)
@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to shift. Default is None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
ifftshift : The inverse of `fftshift`.
Examples
--------
>>> freqs = np.fft.fftfreq(10, 0.1)
>>> freqs
array([ 0., 1., 2., ..., -3., -2., -1.])
>>> np.fft.fftshift(freqs)
array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
Shift the zero-frequency component only along the second axis:
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.fftshift(freqs, axes=(1,))
array([[ 2., 0., 1.],
[-4., 3., 4.],
[-1., -3., -2.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [dim // 2 for dim in x.shape]
elif isinstance(axes, integer_types):
shift = x.shape[axes] // 2
else:
shift = [x.shape[ax] // 2 for ax in axes]
return roll(x, shift, axes)
@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
fftshift : Shift zero-frequency component to the center of the spectrum.
Examples
--------
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.ifftshift(np.fft.fftshift(freqs))
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, integer_types):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
return roll(x, shift, axes)
@set_module('numpy.fft')
def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length `n` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> fourier = np.fft.fft(signal)
>>> n = signal.size
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0 / (n * d)
results = empty(n, int)
N = (n-1)//2 + 1
p1 = arange(0, N, dtype=int)
results[:N] = p1
p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
@set_module('numpy.fft')
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
(for usage with rfft, irfft).
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
the Nyquist frequency component is considered to be positive.
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length ``n//2 + 1`` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
>>> fourier = np.fft.rfft(signal)
>>> n = signal.size
>>> sample_rate = 100
>>> freq = np.fft.fftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., ..., -30., -20., -10.])
>>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., 30., 40., 50.])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0/(n*d)
N = n//2 + 1
results = arange(0, N, dtype=int)
return results * val
| 6,154 | Python | 26.725225 | 79 | 0.544849 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/tests/test_pocketfft.py | import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_equal, assert_raises, assert_allclose
)
import threading
import queue
def fft1(x):
L = len(x)
phase = -2j * np.pi * (np.arange(L) / L)
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D:
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1, maxlen):
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
atol=1e-12)
assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),
xr[0:i], atol=1e-12)
def test_fft(self):
x = random(30) + 1j*random(30)
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
assert_allclose(fft1(x) / 30.,
np.fft.fft(x, norm="forward"), atol=1e-6)
@pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))
def test_ifft(self, norm):
x = random(30) + 1j*random(30)
assert_allclose(
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
# Ensure we get the correct error message
with pytest.raises(ValueError,
match='Invalid number of FFT data points'):
np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x), atol=1e-6)
assert_allclose(np.fft.fft2(x),
np.fft.fft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / (30. * 20.),
np.fft.fft2(x, norm="forward"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x), atol=1e-6)
assert_allclose(np.fft.ifft2(x),
np.fft.ifft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * (30. * 20.),
np.fft.ifft2(x, norm="forward"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x), atol=1e-6)
assert_allclose(np.fft.fftn(x),
np.fft.fftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),
np.fft.fftn(x, norm="forward"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x), atol=1e-6)
assert_allclose(np.fft.ifftn(x),
np.fft.ifftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),
np.fft.ifftn(x, norm="forward"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2*x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n),
np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / n,
np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)
def test_irfft(self):
x = random(30)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
assert_allclose(np.fft.rfft2(x),
np.fft.rfft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / (30. * 20.),
np.fft.rfft2(x, norm="forward"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
assert_allclose(np.fft.rfftn(x),
np.fft.rfftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),
np.fft.rfftn(x, norm="forward"), atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm),
np.fft.hfft(x_herm, norm="backward"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / 30.,
np.fft.hfft(x_herm, norm="forward"), atol=1e-6)
def test_ihfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="backward"), norm="backward"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="ortho"), norm="ortho"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="forward"), norm="forward"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_allclose(op_tr, tr_op, atol=1e-6)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
(np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_allclose(x_norm,
np.linalg.norm(tmp), atol=1e-6)
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted and internally
# converted to 64bit
x = random(30).astype(dtype)
assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
# See discussion in pull/14178
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
raise ValueError()
class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
| 12,827 | Python | 40.649351 | 81 | 0.518126 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/tests/test_helper.py | """Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy import fft, pi
class TestFFTShift:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
assert_array_almost_equal(fft.fftshift(freqs), shifted)
assert_array_almost_equal(fft.ifftshift(shifted), freqs)
def test_uneven_dims(self):
""" Test 2D input, which has uneven dimension sizes """
freqs = [
[0, 1],
[2, 3],
[4, 5]
]
# shift in dimension 0
shift_dim0 = [
[4, 5],
[0, 1],
[2, 3]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
# shift in dimension 1
shift_dim1 = [
[1, 0],
[3, 2],
[5, 4]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
# shift in both dimensions
shift_dim_both = [
[5, 4],
[1, 0],
[3, 2]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
# axes=None (default) shift in all dimensions
assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
def test_equal_to_original(self):
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
""" How fftshift was implemented in v1.14"""
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
def original_ifftshift(x, axes=None):
""" How ifftshift was implemented in v1.14 """
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = n - (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
# create possible 2d array combinations and try all possible keywords
# compare output to original functions
for i in range(16):
for j in range(16):
for axes_keyword in [0, 1, None, (0,), (0, 1)]:
inp = np.random.rand(i, j)
assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
original_fftshift(inp, axes_keyword))
assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
original_ifftshift(inp, axes_keyword))
class TestFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
| 6,148 | Python | 35.60119 | 106 | 0.530416 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/linalg.py | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
fortran_int = intc
@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def _raise_linalgerror_qr(err, flag):
raise LinAlgError("Incorrect argument found while performing "
"QR factorization")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is not type:
a = a.astype(type)
cast_arrays = cast_arrays + (_fastCT(a),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assert_finite(*arrays):
for a in arrays:
if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
if a.size != prod ** 2:
raise LinAlgError(
"Input arrays must satisfy the requirement \
prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
)
a = a.reshape(prod, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def _solve_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
See Also
--------
scipy.linalg.solve : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``:
>>> a = np.array([[1, 2], [3, 5]])
>>> b = np.array([1, 2])
>>> x = np.linalg.solve(a, b)
>>> x
array([-1., 1.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def _tensorinv_dispatcher(a, ind=None):
return (a,)
@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def _unary_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
See Also
--------
scipy.linalg.inv : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.25, 0.75],
[ 0.75, -0.25]]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def _matrix_power_dispatcher(a, n):
return (a,)
@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
try:
n = operator.index(n)
except TypeError as e:
raise TypeError("exponent must be an integer") from e
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. No
checking is performed to verify whether `a` is Hermitian or not.
In addition, only the lower-triangular and diagonal elements of `a`
are used. Only `L` is actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Lower-triangular Cholesky factor of `a`. Returns a matrix object if
`a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
See Also
--------
scipy.linalg.cholesky : Similar function in SciPy.
scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
positive-definite matrix.
scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
`scipy.linalg.cho_solve`.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[1.+0.j, 0.-2.j],
[0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decomposition
def _qr_dispatcher(a, mode=None):
return (a,)
@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (..., M, N)
An array-like object with the dimensionality of at least 2.
mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions
(..., M, K), (..., K, N) (default)
* 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)
* 'r' : returns r only with dimensions (..., K, N)
* 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case. In case the number of dimensions in the input array is
greater than 2 then a stack of the matrices with above properties
is returned.
r : ndarray of float or complex, optional
The upper-triangular matrix or a stack of upper-triangular
matrices if the number of dimensions in the input array is greater
than 2.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
See Also
--------
scipy.linalg.qr : Similar function in SciPy.
scipy.linalg.rq : Compute RQ decomposition of a matrix.
Notes
-----
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input
>>> q, r = np.linalg.qr(a)
>>> q.shape
(3, 2, 2)
>>> r.shape
(3, 2, 2)
>>> np.allclose(a, np.matmul(q, r))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 2, 2, 3])
>>> q, r = np.linalg.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(np.linalg.inv(r), p)
array([ 1., 1.])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError(f"Unrecognized mode '{mode}'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
m, n = a.shape[-2:]
t, result_t = _commonType(a)
a = a.astype(t, copy=True)
a = _to_native_byte_order(a)
mn = min(m, n)
if m <= n:
gufunc = _umath_linalg.qr_r_raw_m
else:
gufunc = _umath_linalg.qr_r_raw_n
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
tau = gufunc(a, signature=signature, extobj=extobj)
# handle modes that don't return q
if mode == 'r':
r = triu(a[..., :mn, :])
r = r.astype(result_t, copy=False)
return wrap(r)
if mode == 'raw':
q = transpose(a)
q = q.astype(result_t, copy=False)
tau = tau.astype(result_t, copy=False)
return wrap(q), tau
if mode == 'economic':
a = a.astype(result_t, copy=False)
return wrap(a)
# mc is the number of columns in the resulting q
# matrix. If the mode is complete then it is
# same as number of rows, and if the mode is reduced,
# then it is the minimum of number of rows and columns.
if mode == 'complete' and m > n:
mc = m
gufunc = _umath_linalg.qr_complete
else:
mc = mn
gufunc = _umath_linalg.qr_reduced
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
q = gufunc(a, tau, signature=signature, extobj=extobj)
r = triu(a[..., :mc, :])
q = q.astype(result_t, copy=False)
r = r.astype(result_t, copy=False)
return wrap(q), wrap(r)
# Eigenvalues
@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def _eigvalsh_dispatcher(a, UPLO=None):
return (a,)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
scipy.linalg.eigvalsh : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
scipy.linalg.eig : Similar function in SciPy that also solves the
generalized eigenvalue problem.
scipy.linalg.schur : Best choice for unitary and other non-Hermitian
normal matrices.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent and `a` can be diagonalized by
a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
is preferred because the matrix `v` is guaranteed to be unitary, which is
not the case when using `eig`. The Schur factorization produces an
upper triangular matrix rather than a diagonal matrix, but for normal
matrices only the diagonal of the upper triangular matrix is needed, the
rest is roundoff error.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``y.T @ a = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([1., 2., 3.])
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([1.+1.j, 1.-1.j])
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([2.+0.j, 0.+0.j])
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([1., 1.])
array([[1., 0.],
[0., 1.]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
(conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
scipy.linalg.eigh : Similar function in SciPy (but also solves the
generalized eigenvalue problem).
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
[ 0. +0.89442719j, 0. -0.4472136j ]])
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
return (a,)
@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, and ``full_matrices=False``, then it is
factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where
`u` and the Hermitian transpose of `vh` are 2D arrays with
orthonormal columns and `s` is a 1D array of `a`'s singular
values. When `a` is higher-dimensional, SVD is applied in
stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
scipy.linalg.svd : Similar function in SciPy.
scipy.linalg.svdvals : Compute singular values of a matrix.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
import numpy as _nx
a, wrap = _makearray(a)
if hermitian:
# note: lapack svd returns eigenvalues with s ** 2 sorted descending,
# but eig returns s sorted ascending, so we re-order the eigenvalues
# and related arrays to have the correct order
if compute_uv:
s, u = eigh(a)
sgn = sign(s)
s = abs(s)
sidx = argsort(s)[..., ::-1]
sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
s = _nx.take_along_axis(s, sidx, axis=-1)
u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = transpose(u * sgn[..., None, :]).conjugate()
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
return sort(s)[..., ::-1]
_assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def _cond_dispatcher(x, p=None):
return (x,)
@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm used in the condition number computation:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the `numpy.inf` object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
>>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
if _is_empty_2d(x):
raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assert_stacked_2d(x)
_assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def _matrix_rank_dispatcher(A, tol=None, hermitian=None):
return (A,)
@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(A, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
A : {(M,), (..., M, N)} array_like
Input vector or stack of matrices.
tol : (...) array_like, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M, N) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `A` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Returns
-------
rank : (...) array_like
Rank of A.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `A`. By default, we identify singular values less
than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `A` that is an exact (in floating point) linear combination of other
columns in `A`. Computing the SVD on `A` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `A` rank deficient even if
the linear combination of some columns of `A` is not exactly equal to
another column of `A` but only numerically very close to another column of
`A`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `A` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documentation, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
A = asarray(A)
if A.ndim < 2:
return int(not all(A==0))
S = svd(A, compute_uv=False, hermitian=hermitian)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def _pinv_dispatcher(a, rcond=None, hermitian=None):
return (a,)
@array_function_dispatch(_pinv_dispatcher)
def pinv(a, rcond=1e-15, hermitian=False):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
scipy.linalg.pinv : Similar function in SciPy.
scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
Hermitian matrix.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def _lstsq_dispatcher(a, b, rcond=None):
return (a, b)
@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
Computes the vector `x` that approximately solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation. Else, `x` minimizes the
Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing
solutions, the one with the smallest 2-norm :math:`||x||` is returned.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of squared residuals: Squared Euclidean 2-norm for each column in
``b - a @ x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
See Also
--------
scipy.linalg.lstsq : Similar function in SciPy.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> m, c
(1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> _ = plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
if n_rhs == 0:
# lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
if m == 0:
x[...] = 0
if n_rhs == 0:
# remove the item we added
x = x[..., :n_rhs]
resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=False), axis=-1)
return result
def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
return (x,)
@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices and raise a ValueError when ``x.ndim != 2``.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
x_real = x.real
x_imag = x.imag
sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag)
else:
sqnorm = x.dot(x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception as e:
raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
# None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= reciprocal(ord, dtype=ret.dtype)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def _multidot_dispatcher(arrays, *, out=None):
yield from arrays
yield out
@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays, *, out=None):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a, b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.19.0
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
numpy.dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random((10000, 100))
>>> B = np.random.random((100, 1000))
>>> C = np.random.random((1000, 5))
>>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
>>> _ = multi_dot([A, B, C, D])
instead of::
>>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1], out=out)
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1, out=out)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C, out=None):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C, out=out)
else:
return dot(A, dot(B, C), out=out)
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j, out=None):
"""Actually do the multiplication with the given order."""
if i == j:
# the initial call with non-None out should never get here
assert out is None
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j),
out=out)
| 89,813 | Python | 30.950907 | 92 | 0.576086 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/__init__.py | """
``numpy.linalg``
================
The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
low level implementations of standard linear algebra algorithms. Those
libraries may be provided by NumPy itself using C versions of a subset of their
reference implementations but, when possible, highly optimized libraries that
take advantage of specialized processor functionality are preferred. Examples
of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
are multithreaded and processor dependent, environmental variables and external
packages such as threadpoolctl may be needed to control the number of threads
or specify the processor architecture.
- OpenBLAS: https://www.openblas.net/
- threadpoolctl: https://github.com/joblib/threadpoolctl
Please note that the most-used linear algebra functions in NumPy are present in
the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
``einsum_path`` and ``kron``.
Functions present in numpy.linalg are listed below.
Matrix and vector products
--------------------------
multi_dot
matrix_power
Decompositions
--------------
cholesky
qr
svd
Matrix eigenvalues
------------------
eig
eigh
eigvals
eigvalsh
Norms and other numbers
-----------------------
norm
cond
det
matrix_rank
slogdet
Solving equations and inverting matrices
----------------------------------------
solve
tensorsolve
lstsq
inv
pinv
tensorinv
Exceptions
----------
LinAlgError
"""
# To get sub-modules
from . import linalg
from .linalg import *
__all__ = linalg.__all__.copy()
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 1,813 | Python | 21.395061 | 79 | 0.685604 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/setup.py | import os
import sys
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('tests')
# Configure lapack_lite
src_dir = 'lapack_lite'
lapack_lite_src = [
os.path.join(src_dir, 'python_xerbla.c'),
os.path.join(src_dir, 'f2c_z_lapack.c'),
os.path.join(src_dir, 'f2c_c_lapack.c'),
os.path.join(src_dir, 'f2c_d_lapack.c'),
os.path.join(src_dir, 'f2c_s_lapack.c'),
os.path.join(src_dir, 'f2c_lapack.c'),
os.path.join(src_dir, 'f2c_blas.c'),
os.path.join(src_dir, 'f2c_config.c'),
os.path.join(src_dir, 'f2c.c'),
]
all_sources = config.paths(lapack_lite_src)
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
lapack_info = get_info('lapack_ilp64_opt', 2)
else:
lapack_info = get_info('lapack_opt', 0) # and {}
use_lapack_lite = not lapack_info
if use_lapack_lite:
# This makes numpy.distutils write the fact that lapack_lite
# is being used to numpy.__config__
class numpy_linalg_lapack_lite(system_info):
def calc_info(self):
info = {'language': 'c'}
if sys.maxsize > 2**32:
# Build lapack-lite in 64-bit integer mode.
# The suffix is arbitrary (lapack_lite symbols follow it),
# but use the "64_" convention here.
info['define_macros'] = [
('HAVE_BLAS_ILP64', None),
('BLAS_SYMBOL_SUFFIX', '64_')
]
self.set_info(**info)
lapack_info = numpy_linalg_lapack_lite().get_info(2)
def get_lapack_lite_sources(ext, build_dir):
if use_lapack_lite:
print("### Warning: Using unoptimized lapack ###")
return all_sources
else:
if sys.platform == 'win32':
print("### Warning: python_xerbla.c is disabled ###")
return []
return [all_sources[0]]
config.add_extension(
'lapack_lite',
sources=['lapack_litemodule.c', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
)
# umath_linalg module
config.add_extension(
'_umath_linalg',
sources=['umath_linalg.cpp', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
extra_cxx_compile_args=NPY_CXX_FLAGS,
libraries=['npymath'],
)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 2,925 | Python | 33.423529 | 78 | 0.565128 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/tests/test_deprecations.py | """Test deprecation and future warnings.
"""
import numpy as np
from numpy.testing import assert_warns
def test_qr_mode_full_future_warning():
"""Check mode='full' FutureWarning.
In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were
deprecated. The release date will probably be sometime in the summer
of 2013.
"""
a = np.eye(2)
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
| 640 | Python | 29.523808 | 73 | 0.7 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/tests/test_regression.py | """ Test functions for linalg module
"""
import warnings
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
class TestRegression:
def test_eig_build(self):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
-6.04004526e-01 + 15.84422474j,
-6.04004526e-01 - 15.84422474j,
-1.13692929e+01 + 0.j,
-6.57612485e-01 + 10.41755503j,
-6.57612485e-01 - 10.41755503j,
1.82126812e+01 + 0.j,
1.06011014e+01 + 0.j,
7.80732773e+00 + 0.j,
-7.65390898e-01 + 0.j,
1.51971555e-15 + 0.j,
-1.51308713e-15 + 0.j])
a = arange(13 * 13, dtype=float64)
a.shape = (13, 13)
a = a % 17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
# Regression for #786: Frobenius norm for vectors raises
# ValueError.
assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[5.7998084, -2.1825367],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
def test_norm_object_array(self):
# gh-7575
testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype != np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(ValueError, linalg.norm, testvector, ord='fro')
assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
assert_raises((AttributeError, DeprecationWarning),
linalg.norm, testvector, ord=0)
assert_raises(ValueError, linalg.norm, testvector, ord=-1)
assert_raises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
def test_lstsq_complex_larger_rhs(self):
# gh-9891
size = 20
n_rhs = 70
G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
b = G.dot(u)
# This should work without segmentation fault.
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u)
| 5,597 | Python | 36.57047 | 76 | 0.55226 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/tests/test_linalg.py | """ Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy.core import swapaxes
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(consistent_subclass(a_inv, a))
class TestInv(InvCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
class TestEigvals(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(consistent_subclass(evectors, a))
class TestEig(EigCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
hermitian = True
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2/3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
@pytest.mark.xfail(True, run=False,
reason="Platform/LAPACK-dependent failure, "
"see gh-18914")
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float_))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1,0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0,0] = 0
A[1,1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0,0], np.inf)
assert_equal(c[1,1], np.inf)
assert_(np.isfinite(c[0,1]))
assert_(np.isfinite(c[1,0]))
class PinvCases(LinalgSquareTestCase,
LinalgNonsquareTestCase,
LinalgGeneralizedSquareTestCase,
LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinv(PinvCases):
pass
class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a, hermitian=True)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinvHermitian(PinvHermitianCases):
pass
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
assert_(res[1].dtype.type is np.float32)
a = np.zeros((0, 0), dtype=np.float64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.float64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.float64)
assert_(res[1].dtype.type is np.float64)
class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(consistent_subclass(x, b))
assert_(consistent_subclass(residuals, b))
class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
@pytest.mark.parametrize(["m", "n", "n_rhs"], [
(4, 2, 2),
(0, 4, 1),
(0, 4, 2),
(4, 0, 1),
(4, 0, 2),
(4, 2, 0),
(0, 0, 0)
])
def test_empty_a_b(self, m, n, n_rhs):
a = np.arange(m * n).reshape(m, n)
b = np.ones((m, n_rhs))
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
if m == 0:
assert_((x == 0).all())
assert_equal(x.shape, (n, n_rhs))
assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
if m > n and n_rhs > 0:
# residuals are exactly the squared norms of b's columns
r = b - np.dot(a, x)
assert_almost_equal(residuals, (r * r).sum(axis=-2))
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
rshft_2 = rshft_0[[2, 3, 0, 1]]
rshft_3 = rshft_0[[1, 2, 3, 0]]
rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
noninv = array([[1, 0], [0, 0]])
stacked = np.block([[[rshft_0]]]*2)
#FIXME the 'e' dtype might work in future
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
def test_large_power(self, dt):
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
def test_power_is_zero(self, dt):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_one(self, dt):
def tz(mat):
mz = matrix_power(mat, 1)
assert_equal(mz, mat)
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_two(self, dt):
def tz(mat):
mz = matrix_power(mat, 2)
mmul = matmul if mat.dtype != object else dot
assert_equal(mz, mmul(mat, mat))
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_minus_one(self, dt):
def tz(mat):
invmat = matrix_power(mat, -1)
mmul = matmul if mat.dtype != object else dot
assert_almost_equal(
mmul(invmat, mat), identity_like_generalized(mat))
for mat in self.rshft_all:
if dt not in self.dtnoinv:
tz(mat.astype(dt))
def test_exceptions_bad_power(self, dt):
mat = self.rshft_0.astype(dt)
assert_raises(TypeError, matrix_power, mat, 1.5)
assert_raises(TypeError, matrix_power, mat, [1])
def test_exceptions_non_square(self, dt):
assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
def test_exceptions_not_invertible(self, dt):
if dt in self.dtnoinv:
return
mat = self.noninv.astype(dt)
assert_raises(LinAlgError, matrix_power, mat, -1)
class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
class TestEigvalsh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float32)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[..., None, :] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
class TestEigh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.float32)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class _TestNormBase:
dt = None
dec = None
@staticmethod
def check_dtype(x, res):
if issubclass(x.dtype.type, np.inexact):
assert_equal(res.dtype, x.real.dtype)
else:
# For integer input, don't have to test float precision of output.
assert_(issubclass(res.dtype.type, np.floating))
class _TestNormGeneral(_TestNormBase):
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = np.typecodes['AllInteger']
inexact_types = np.typecodes['AllFloat']
all_types = exact_types + inexact_types
for each_type in all_types:
at = a.astype(each_type)
an = norm(at, -np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
self.check_dtype(at, an)
assert_almost_equal(an, 0.0)
an = norm(at, 0)
self.check_dtype(at, an)
assert_almost_equal(an, 2)
an = norm(at, 1)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 2)
self.check_dtype(at, an)
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
an = norm(at, 4)
self.check_dtype(at, an)
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
an = norm(at, np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
class _TestNorm2D(_TestNormBase):
# Define the part for 2d arrays separately, so we can subclass this
# and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
array = np.array
def test_matrix_empty(self):
assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
def test_matrix_return_type(self):
a = self.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_type in all_types:
at = a.astype(each_type)
an = norm(at, -np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
an = norm(at, 1)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 2)
self.check_dtype(at, an)
assert_almost_equal(an, 3.0**(1.0/2.0))
an = norm(at, -2)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
self.check_dtype(at, an)
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_matrix_2x2(self):
A = self.array([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1 / 10) * \
self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` or any other string raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
assert_raises(ValueError, norm, [3, 4], 'test', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(np.AxisError, norm, B, None, 3)
assert_raises(np.AxisError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class _TestNorm(_TestNorm2D, _TestNormGeneral):
pass
class TestNorm_NonSystematic:
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6 + 7j
d[1] = -6 + 7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
# Separate definitions so we can use them for matrix tests.
class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
class TestNormDouble(_TestNorm, _TestNormDoubleBase):
pass
class TestNormSingle(_TestNorm, _TestNormSingleBase):
pass
class TestNormInt64(_TestNorm, _TestNormInt64Base):
pass
class TestMatrixRank:
def test_matrix_rank(self):
# Full rank matrix
assert_equal(4, matrix_rank(np.eye(4)))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(matrix_rank(I), 3)
# All zeros - zero rank
assert_equal(matrix_rank(np.zeros((4, 4))), 0)
# 1 dimension - rank 1 unless all 0
assert_equal(matrix_rank([1, 0, 0, 0]), 1)
assert_equal(matrix_rank(np.zeros((4,))), 0)
# accepts array-like
assert_equal(matrix_rank([1]), 1)
# greater than 2 dimensions treated as stacked matrices
ms = np.array([I, np.eye(4), np.zeros((4,4))])
assert_equal(matrix_rank(ms), np.array([3, 4, 0]))
# works on scalar
assert_equal(matrix_rank(1), 1)
def test_symmetric_rank(self):
assert_equal(4, matrix_rank(np.eye(4), hermitian=True))
assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))
assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(3, matrix_rank(I, hermitian=True))
# manually supplied tolerance
I[-1, -1] = 1e-8
assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))
assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR:
# Define the array class here, so run this on matrices elsewhere.
array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize(["m", "n"], [
(3, 0),
(0, 3),
(0, 0)
])
def test_qr_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
self.check_qr(a)
h, tau = np.linalg.qr(a, mode='raw')
assert_equal(h.dtype, np.double)
assert_equal(tau.dtype, np.double)
assert_equal(h.shape, (n, m))
assert_equal(tau.shape, (k,))
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = self.array([[1, 2], [3, 4]])
b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
def check_qr_stacked(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape[-2:]
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape[-2:] == (m, m))
assert_(r.shape[-2:] == (m, n))
assert_almost_equal(matmul(q, r), a)
I_mat = np.identity(q.shape[-1])
stack_I_mat = np.broadcast_to(I_mat,
q.shape[:-2] + (q.shape[-1],)*2)
assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat)
assert_almost_equal(np.triu(r[..., :, :]), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape[-2:] == (m, k))
assert_(r1.shape[-2:] == (k, n))
assert_almost_equal(matmul(q1, r1), a)
I_mat = np.identity(q1.shape[-1])
stack_I_mat = np.broadcast_to(I_mat,
q1.shape[:-2] + (q1.shape[-1],)*2)
assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),
stack_I_mat)
assert_almost_equal(np.triu(r1[..., :, :]), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize("size", [
(3, 4), (4, 3), (4, 4),
(3, 0), (0, 3)])
@pytest.mark.parametrize("outer_size", [
(2, 2), (2,), (2, 3, 4)])
@pytest.mark.parametrize("dt", [
np.single, np.double,
np.csingle, np.cdouble])
def test_stacked_inputs(self, outer_size, size, dt):
A = np.random.normal(size=outer_size + size).astype(dt)
B = np.random.normal(size=outer_size + size).astype(dt)
self.check_qr_stacked(A)
self.check_qr_stacked(A + 1.j*B)
class TestCholesky:
# TODO: are there no other tests for cholesky?
@pytest.mark.parametrize(
'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
)
@pytest.mark.parametrize(
'dtype', (np.float32, np.float64, np.complex64, np.complex128)
)
def test_basic_property(self, shape, dtype):
# Check A = L L^H
np.random.seed(1)
a = np.random.randn(*shape)
if np.issubdtype(dtype, np.complexfloating):
a = a + 1j*np.random.randn(*shape)
t = list(range(len(shape)))
t[-2:] = -1, -2
a = np.matmul(a.transpose(t).conj(), a)
a = np.asarray(a, dtype=dtype)
c = np.linalg.cholesky(a)
b = np.matmul(c, c.transpose(t).conj())
assert_allclose(b, a,
err_msg=f'{shape} {dtype}\n{a}\n{c}',
atol=500 * a.shape[0] * np.finfo(dtype).eps)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.float64)
# for documentation purpose:
assert_(isinstance(res, np.ndarray))
a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.complex64)
assert_(isinstance(res, np.ndarray))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
pytest.skip("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except Exception:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
pytest.skip('Numpy xerbla not linked in.')
@pytest.mark.slow
def test_sdot_bug_8577():
# Regression test that loading certain other libraries does not
# result to wrong results in float32 linear algebra.
#
# There's a bug gh-8577 on OSX that can trigger this, and perhaps
# there are also other situations in which it occurs.
#
# Do the check in a separate process.
bad_libs = ['PyQt5.QtWidgets', 'IPython']
template = textwrap.dedent("""
import sys
{before}
try:
import {bad_lib}
except ImportError:
sys.exit(0)
{after}
x = np.ones(2, dtype=np.float32)
sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
""")
for bad_lib in bad_libs:
code = template.format(before="import numpy as np", after="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
# Swapped import order
code = template.format(after="import numpy as np", before="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
class TestMultiDot:
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
def test_basic_function_with_two_arguments(self):
# separate code path with two arguments
A = np.random.random((6, 2))
B = np.random.random((2, 6))
assert_almost_equal(multi_dot([A, B]), A.dot(B))
assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
def test_basic_function_with_dynamic_programming_optimization(self):
# multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
def test_vector_as_first_argument(self):
# The first argument can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 2))
# the result should be 1-D
assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
def test_vector_as_last_argument(self):
# The last argument can be 1-D
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be 1-D
assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
def test_vector_as_first_and_last_argument(self):
# The first and last arguments can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be a scalar
assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
def test_three_arguments_and_out(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
out = np.zeros((6, 2))
ret = multi_dot([A, B, C], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B).dot(C))
assert_almost_equal(out, np.dot(A, np.dot(B, C)))
def test_two_arguments_and_out(self):
# separate code path with two arguments
A = np.random.random((6, 2))
B = np.random.random((2, 6))
out = np.zeros((6, 6))
ret = multi_dot([A, B], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B))
assert_almost_equal(out, np.dot(A, B))
def test_dynamic_programming_optimization_and_out(self):
# multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate test
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
out = np.zeros((6, 1))
ret = multi_dot([A, B, C, D], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B).dot(C).dot(D))
def test_dynamic_programming_logic(self):
# Test for the dynamic programming part
# This test is directly taken from Cormen page 376.
arrays = [np.random.random((30, 35)),
np.random.random((35, 15)),
np.random.random((15, 5)),
np.random.random((5, 10)),
np.random.random((10, 20)),
np.random.random((20, 25))]
m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
[0., 0., 2625., 4375., 7125., 10500.],
[0., 0., 0., 750., 2500., 5375.],
[0., 0., 0., 0., 1000., 3500.],
[0., 0., 0., 0., 0., 5000.],
[0., 0., 0., 0., 0., 0.]])
s_expected = np.array([[0, 1, 1, 3, 3, 3],
[0, 0, 2, 3, 3, 3],
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0]], dtype=int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
# Only the upper triangular part (without the diagonal) is interesting.
assert_almost_equal(np.triu(s[:-1, 1:]),
np.triu(s_expected[:-1, 1:]))
assert_almost_equal(np.triu(m), np.triu(m_expected))
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
class TestTensorinv:
@pytest.mark.parametrize("arr, ind", [
(np.ones((4, 6, 8, 2)), 2),
(np.ones((3, 3, 2)), 1),
])
def test_non_square_handling(self, arr, ind):
with assert_raises(LinAlgError):
linalg.tensorinv(arr, ind=ind)
@pytest.mark.parametrize("shape, ind", [
# examples from docstring
((4, 6, 8, 3), 2),
((24, 8, 3), 1),
])
def test_tensorinv_shape(self, shape, ind):
a = np.eye(24)
a.shape = shape
ainv = linalg.tensorinv(a=a, ind=ind)
expected = a.shape[ind:] + a.shape[:ind]
actual = ainv.shape
assert_equal(actual, expected)
@pytest.mark.parametrize("ind", [
0, -2,
])
def test_tensorinv_ind_limit(self, ind):
a = np.eye(24)
a.shape = (4, 6, 8, 3)
with assert_raises(ValueError):
linalg.tensorinv(a=a, ind=ind)
def test_tensorinv_result(self):
# mimic a docstring example
a = np.eye(24)
a.shape = (24, 8, 3)
ainv = linalg.tensorinv(a, ind=1)
b = np.ones(24)
assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
class TestTensorsolve:
@pytest.mark.parametrize("a, axes", [
(np.ones((4, 6, 8, 2)), None),
(np.ones((3, 3, 2)), (0, 2)),
])
def test_non_square_handling(self, a, axes):
with assert_raises(LinAlgError):
b = np.ones(a.shape[:2])
linalg.tensorsolve(a, b, axes=axes)
@pytest.mark.parametrize("shape",
[(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)],
)
def test_tensorsolve_result(self, shape):
a = np.random.randn(*shape)
b = np.ones(a.shape[:2])
x = np.linalg.tensorsolve(a, b)
assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b)
def test_unsupported_commontype():
# linalg gracefully handles unsupported type
arr = np.array([[1, -2], [2, 5]], dtype='float16')
with assert_raises_regex(TypeError, "unsupported in linalg"):
linalg.cholesky(arr)
#@pytest.mark.slow
#@pytest.mark.xfail(not HAS_LAPACK64, run=False,
# reason="Numpy not compiled with 64-bit BLAS/LAPACK")
#@requires_memory(free_bytes=16e9)
@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")
def test_blas64_dot():
n = 2**32
a = np.zeros([1, n], dtype=np.float32)
b = np.ones([1, 1], dtype=np.float32)
a[0,-1] = 1
c = np.dot(b, a)
assert_equal(c[0,-1], 1)
@pytest.mark.xfail(not HAS_LAPACK64,
reason="Numpy not compiled with 64-bit BLAS/LAPACK")
def test_blas64_geqrf_lwork_smoketest():
# Smoke test LAPACK geqrf lwork call with 64-bit integers
dtype = np.float64
lapack_routine = np.linalg.lapack_lite.dgeqrf
m = 2**32 + 1
n = 2**32 + 1
lda = m
# Dummy arrays, not referenced by the lapack routine, so don't
# need to be of the right size
a = np.zeros([1, 1], dtype=dtype)
work = np.zeros([1], dtype=dtype)
tau = np.zeros([1], dtype=dtype)
# Size query
results = lapack_routine(m, n, a, lda, tau, work, -1, 0)
assert_equal(results['info'], 0)
assert_equal(results['m'], m)
assert_equal(results['n'], m)
# Should result to an integer of a reasonable size
lwork = int(work.item())
assert_(2**32 < lwork < 2**42)
| 77,352 | Python | 34.41804 | 106 | 0.539624 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_add_docstring.py | """A module for creating docstrings for sphinx ``data`` domains."""
import re
import textwrap
from ._generic_alias import NDArray
_docstrings_list = []
def add_newdoc(name: str, value: str, doc: str) -> None:
"""Append ``_docstrings_list`` with a docstring for `name`.
Parameters
----------
name : str
The name of the object.
value : str
A string-representation of the object.
doc : str
The docstring of the object.
"""
_docstrings_list.append((name, value, doc))
def _parse_docstrings() -> str:
"""Convert all docstrings in ``_docstrings_list`` into a single
sphinx-legible text block.
"""
type_list_ret = []
for name, value, doc in _docstrings_list:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
indent = ""
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
if prev == "Examples":
indent = ""
new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
else:
indent = 4 * " "
new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
new_lines.append("")
else:
new_lines.append(f"{indent}{line}")
s = "\n".join(new_lines)
s_block = f""".. data:: {name}\n :value: {value}\n {s}"""
type_list_ret.append(s_block)
return "\n".join(type_list_ret)
add_newdoc('ArrayLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced
into an `~numpy.ndarray`.
Among others this includes the likes of:
* Scalars.
* (Nested) sequences.
* Objects implementing the `~class.__array__` protocol.
.. versionadded:: 1.20
See Also
--------
:term:`array_like`:
Any scalar or sequence that can be interpreted as an ndarray.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_array(a: npt.ArrayLike) -> np.ndarray:
... return np.array(a)
""")
add_newdoc('DTypeLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced
into a `~numpy.dtype`.
Among others this includes the likes of:
* :class:`type` objects.
* Character codes or the names of :class:`type` objects.
* Objects with the ``.dtype`` attribute.
.. versionadded:: 1.20
See Also
--------
:ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
A comprehensive overview of all objects that can be coerced
into data types.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_dtype(d: npt.DTypeLike) -> np.dtype:
... return np.dtype(d)
""")
add_newdoc('NDArray', repr(NDArray),
"""
A :term:`generic <generic type>` version of
`np.ndarray[Any, np.dtype[+ScalarType]] <numpy.ndarray>`.
Can be used during runtime for typing arrays with a given dtype
and unspecified shape.
.. versionadded:: 1.21
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> print(npt.NDArray)
numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]]
>>> print(npt.NDArray[np.float64])
numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
>>> NDArrayInt = npt.NDArray[np.int_]
>>> a: NDArrayInt = np.arange(10)
>>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]:
... return np.array(a)
""")
_docstrings = _parse_docstrings()
| 3,925 | Python | 24.660131 | 78 | 0.551338 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_nbit.py | """A module with the precisions of platform-specific `~numpy.number`s."""
from typing import Any
# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
_NBitByte = Any
_NBitShort = Any
_NBitIntC = Any
_NBitIntP = Any
_NBitInt = Any
_NBitLongLong = Any
_NBitHalf = Any
_NBitSingle = Any
_NBitDouble = Any
_NBitLongDouble = Any
| 345 | Python | 19.35294 | 73 | 0.727536 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_dtype_like.py | from typing import (
Any,
List,
Sequence,
Tuple,
Union,
Type,
TypeVar,
Protocol,
TypedDict,
runtime_checkable,
)
import numpy as np
from ._shape import _ShapeLike
from ._generic_alias import _DType as DType
from ._char_codes import (
_BoolCodes,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_Float16Codes,
_Float32Codes,
_Float64Codes,
_Complex64Codes,
_Complex128Codes,
_ByteCodes,
_ShortCodes,
_IntCCodes,
_IntPCodes,
_IntCodes,
_LongLongCodes,
_UByteCodes,
_UShortCodes,
_UIntCCodes,
_UIntPCodes,
_UIntCodes,
_ULongLongCodes,
_HalfCodes,
_SingleCodes,
_DoubleCodes,
_LongDoubleCodes,
_CSingleCodes,
_CDoubleCodes,
_CLongDoubleCodes,
_DT64Codes,
_TD64Codes,
_StrCodes,
_BytesCodes,
_VoidCodes,
_ObjectCodes,
)
_SCT = TypeVar("_SCT", bound=np.generic)
_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
_DTypeLikeNested = Any # TODO: wait for support for recursive types
# Mandatory keys
class _DTypeDictBase(TypedDict):
names: Sequence[str]
formats: Sequence[_DTypeLikeNested]
# Mandatory + optional keys
class _DTypeDict(_DTypeDictBase, total=False):
# Only `str` elements are usable as indexing aliases,
# but `titles` can in principle accept any object
offsets: Sequence[int]
titles: Sequence[Any]
itemsize: int
aligned: bool
# A protocol for anything with the dtype attribute
@runtime_checkable
class _SupportsDType(Protocol[_DType_co]):
@property
def dtype(self) -> _DType_co: ...
# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
_DTypeLike = Union[
"np.dtype[_SCT]",
Type[_SCT],
_SupportsDType["np.dtype[_SCT]"],
]
# Would create a dtype[np.void]
_VoidDTypeLike = Union[
# (flexible_dtype, itemsize)
Tuple[_DTypeLikeNested, int],
# (fixed_dtype, shape)
Tuple[_DTypeLikeNested, _ShapeLike],
# [(field_name, field_dtype, field_shape), ...]
#
# The type here is quite broad because NumPy accepts quite a wide
# range of inputs inside the list; see the tests for some
# examples.
List[Any],
# {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ...,
# 'itemsize': ...}
_DTypeDict,
# (base_dtype, new_dtype)
Tuple[_DTypeLikeNested, _DTypeLikeNested],
]
# Anything that can be coerced into numpy.dtype.
# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
DTypeLike = Union[
DType[Any],
# default data type (float64)
None,
# array-scalar types and generic types
Type[Any], # NOTE: We're stuck with `Type[Any]` due to object dtypes
# anything with a dtype attribute
_SupportsDType[DType[Any]],
# character codes, type strings or comma-separated fields, e.g., 'float64'
str,
_VoidDTypeLike,
]
# NOTE: while it is possible to provide the dtype as a dict of
# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`),
# this syntax is officially discourged and
# therefore not included in the Union defining `DTypeLike`.
#
# See https://github.com/numpy/numpy/issues/16891 for more details.
# Aliases for commonly used dtype-like objects.
# Note that the precision of `np.number` subclasses is ignored herein.
_DTypeLikeBool = Union[
Type[bool],
Type[np.bool_],
DType[np.bool_],
_SupportsDType[DType[np.bool_]],
_BoolCodes,
]
_DTypeLikeUInt = Union[
Type[np.unsignedinteger],
DType[np.unsignedinteger],
_SupportsDType[DType[np.unsignedinteger]],
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_UByteCodes,
_UShortCodes,
_UIntCCodes,
_UIntPCodes,
_UIntCodes,
_ULongLongCodes,
]
_DTypeLikeInt = Union[
Type[int],
Type[np.signedinteger],
DType[np.signedinteger],
_SupportsDType[DType[np.signedinteger]],
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_ByteCodes,
_ShortCodes,
_IntCCodes,
_IntPCodes,
_IntCodes,
_LongLongCodes,
]
_DTypeLikeFloat = Union[
Type[float],
Type[np.floating],
DType[np.floating],
_SupportsDType[DType[np.floating]],
_Float16Codes,
_Float32Codes,
_Float64Codes,
_HalfCodes,
_SingleCodes,
_DoubleCodes,
_LongDoubleCodes,
]
_DTypeLikeComplex = Union[
Type[complex],
Type[np.complexfloating],
DType[np.complexfloating],
_SupportsDType[DType[np.complexfloating]],
_Complex64Codes,
_Complex128Codes,
_CSingleCodes,
_CDoubleCodes,
_CLongDoubleCodes,
]
_DTypeLikeDT64 = Union[
Type[np.timedelta64],
DType[np.timedelta64],
_SupportsDType[DType[np.timedelta64]],
_TD64Codes,
]
_DTypeLikeTD64 = Union[
Type[np.datetime64],
DType[np.datetime64],
_SupportsDType[DType[np.datetime64]],
_DT64Codes,
]
_DTypeLikeStr = Union[
Type[str],
Type[np.str_],
DType[np.str_],
_SupportsDType[DType[np.str_]],
_StrCodes,
]
_DTypeLikeBytes = Union[
Type[bytes],
Type[np.bytes_],
DType[np.bytes_],
_SupportsDType[DType[np.bytes_]],
_BytesCodes,
]
_DTypeLikeVoid = Union[
Type[np.void],
DType[np.void],
_SupportsDType[DType[np.void]],
_VoidCodes,
_VoidDTypeLike,
]
_DTypeLikeObject = Union[
type,
DType[np.object_],
_SupportsDType[DType[np.object_]],
_ObjectCodes,
]
_DTypeLikeComplex_co = Union[
_DTypeLikeBool,
_DTypeLikeUInt,
_DTypeLikeInt,
_DTypeLikeFloat,
_DTypeLikeComplex,
]
| 5,628 | Python | 21.516 | 78 | 0.649431 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_char_codes.py | from typing import Literal
_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
_UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
_UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
_UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
_Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
_Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
_Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
_Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
_Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
_Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
_Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
_Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
_Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
_ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
_ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
_IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
_IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
_IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
_LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
_UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
_UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
_UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
_UIntCodes = Literal["ulong", "uint", "L", "=L", "<L", ">L"]
_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
_HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
_SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
_VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
_ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
_DT64Codes = Literal[
"datetime64", "=datetime64", "<datetime64", ">datetime64",
"datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
"datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
"datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
"datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
"datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
"datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
"datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
"datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
"datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
"datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
"datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
"datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
"datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
"M", "=M", "<M", ">M",
"M8", "=M8", "<M8", ">M8",
"M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
"M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
"M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
"M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
"M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
"M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
"M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
"M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
"M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
"M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
"M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
"M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
"M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
]
_TD64Codes = Literal[
"timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
"timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
"timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
"timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
"timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
"timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
"timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
"timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
"timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
"timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
"timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
"timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
"timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
"timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
"m", "=m", "<m", ">m",
"m8", "=m8", "<m8", ">m8",
"m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
"m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
"m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
"m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
"m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
"m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
"m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
"m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
"m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
"m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
"m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
"m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
"m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
]
| 5,916 | Python | 51.830357 | 94 | 0.505578 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_generic_alias.py | from __future__ import annotations
import sys
import types
from collections.abc import Generator, Iterable, Iterator
from typing import (
Any,
ClassVar,
NoReturn,
TypeVar,
TYPE_CHECKING,
)
import numpy as np
__all__ = ["_GenericAlias", "NDArray"]
_T = TypeVar("_T", bound="_GenericAlias")
def _to_str(obj: object) -> str:
"""Helper function for `_GenericAlias.__repr__`."""
if obj is Ellipsis:
return '...'
elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE):
if obj.__module__ == 'builtins':
return obj.__qualname__
else:
return f'{obj.__module__}.{obj.__qualname__}'
else:
return repr(obj)
def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
"""Search for all typevars and typevar-containing objects in `args`.
Helper function for `_GenericAlias.__init__`.
"""
for i in args:
if hasattr(i, "__parameters__"):
yield from i.__parameters__
elif isinstance(i, TypeVar):
yield i
def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
"""Recursively replace all typevars with those from `parameters`.
Helper function for `_GenericAlias.__getitem__`.
"""
args = []
for i in alias.__args__:
if isinstance(i, TypeVar):
value: Any = next(parameters)
elif isinstance(i, _GenericAlias):
value = _reconstruct_alias(i, parameters)
elif hasattr(i, "__parameters__"):
prm_tup = tuple(next(parameters) for _ in i.__parameters__)
value = i[prm_tup]
else:
value = i
args.append(value)
cls = type(alias)
return cls(alias.__origin__, tuple(args), alias.__unpacked__)
class _GenericAlias:
"""A python-based backport of the `types.GenericAlias` class.
E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and
``t.__args__`` is ``(int,)``.
See Also
--------
:pep:`585`
The PEP responsible for introducing `types.GenericAlias`.
"""
__slots__ = (
"__weakref__",
"_origin",
"_args",
"_parameters",
"_hash",
"_starred",
)
@property
def __origin__(self) -> type:
return super().__getattribute__("_origin")
@property
def __args__(self) -> tuple[object, ...]:
return super().__getattribute__("_args")
@property
def __parameters__(self) -> tuple[TypeVar, ...]:
"""Type variables in the ``GenericAlias``."""
return super().__getattribute__("_parameters")
@property
def __unpacked__(self) -> bool:
return super().__getattribute__("_starred")
@property
def __typing_unpacked_tuple_args__(self) -> tuple[object, ...] | None:
# NOTE: This should return `__args__` if `__origin__` is a tuple,
# which should never be the case with how `_GenericAlias` is used
# within numpy
return None
def __init__(
self,
origin: type,
args: object | tuple[object, ...],
starred: bool = False,
) -> None:
self._origin = origin
self._args = args if isinstance(args, tuple) else (args,)
self._parameters = tuple(_parse_parameters(self.__args__))
self._starred = starred
@property
def __call__(self) -> type[Any]:
return self.__origin__
def __reduce__(self: _T) -> tuple[
type[_T],
tuple[type[Any], tuple[object, ...], bool],
]:
cls = type(self)
return cls, (self.__origin__, self.__args__, self.__unpacked__)
def __mro_entries__(self, bases: Iterable[object]) -> tuple[type[Any]]:
return (self.__origin__,)
def __dir__(self) -> list[str]:
"""Implement ``dir(self)``."""
cls = type(self)
dir_origin = set(dir(self.__origin__))
return sorted(cls._ATTR_EXCEPTIONS | dir_origin)
def __hash__(self) -> int:
"""Return ``hash(self)``."""
# Attempt to use the cached hash
try:
return super().__getattribute__("_hash")
except AttributeError:
self._hash: int = (
hash(self.__origin__) ^
hash(self.__args__) ^
hash(self.__unpacked__)
)
return super().__getattribute__("_hash")
def __instancecheck__(self, obj: object) -> NoReturn:
"""Check if an `obj` is an instance."""
raise TypeError("isinstance() argument 2 cannot be a "
"parameterized generic")
def __subclasscheck__(self, cls: type) -> NoReturn:
"""Check if a `cls` is a subclass."""
raise TypeError("issubclass() argument 2 cannot be a "
"parameterized generic")
def __repr__(self) -> str:
"""Return ``repr(self)``."""
args = ", ".join(_to_str(i) for i in self.__args__)
origin = _to_str(self.__origin__)
prefix = "*" if self.__unpacked__ else ""
return f"{prefix}{origin}[{args}]"
def __getitem__(self: _T, key: object | tuple[object, ...]) -> _T:
"""Return ``self[key]``."""
key_tup = key if isinstance(key, tuple) else (key,)
if len(self.__parameters__) == 0:
raise TypeError(f"There are no type variables left in {self}")
elif len(key_tup) > len(self.__parameters__):
raise TypeError(f"Too many arguments for {self}")
elif len(key_tup) < len(self.__parameters__):
raise TypeError(f"Too few arguments for {self}")
key_iter = iter(key_tup)
return _reconstruct_alias(self, key_iter)
def __eq__(self, value: object) -> bool:
"""Return ``self == value``."""
if not isinstance(value, _GENERIC_ALIAS_TYPE):
return NotImplemented
return (
self.__origin__ == value.__origin__ and
self.__args__ == value.__args__ and
self.__unpacked__ == getattr(
value, "__unpacked__", self.__unpacked__
)
)
def __iter__(self: _T) -> Generator[_T, None, None]:
"""Return ``iter(self)``."""
cls = type(self)
yield cls(self.__origin__, self.__args__, True)
_ATTR_EXCEPTIONS: ClassVar[frozenset[str]] = frozenset({
"__origin__",
"__args__",
"__parameters__",
"__mro_entries__",
"__reduce__",
"__reduce_ex__",
"__copy__",
"__deepcopy__",
"__unpacked__",
"__typing_unpacked_tuple_args__",
"__class__",
})
def __getattribute__(self, name: str) -> Any:
"""Return ``getattr(self, name)``."""
# Pull the attribute from `__origin__` unless its
# name is in `_ATTR_EXCEPTIONS`
cls = type(self)
if name in cls._ATTR_EXCEPTIONS:
return super().__getattribute__(name)
return getattr(self.__origin__, name)
# See `_GenericAlias.__eq__`
if sys.version_info >= (3, 9):
_GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias)
else:
_GENERIC_ALIAS_TYPE = (_GenericAlias,)
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
if TYPE_CHECKING or sys.version_info >= (3, 9):
_DType = np.dtype[ScalarType]
NDArray = np.ndarray[Any, np.dtype[ScalarType]]
else:
_DType = _GenericAlias(np.dtype, (ScalarType,))
NDArray = _GenericAlias(np.ndarray, (Any, _DType))
| 7,479 | Python | 29.406504 | 77 | 0.53697 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_scalars.py | from typing import Union, Tuple, Any
import numpy as np
# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
# `np.bytes_` are already subclasses of their builtin counterpart
_CharLike_co = Union[str, bytes]
# The 6 `<X>Like_co` type-aliases below represent all scalars that can be
# coerced into `<X>` (with the casting rule `same_kind`)
_BoolLike_co = Union[bool, np.bool_]
_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger]
_IntLike_co = Union[_BoolLike_co, int, np.integer]
_FloatLike_co = Union[_IntLike_co, float, np.floating]
_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating]
_TD64Like_co = Union[_IntLike_co, np.timedelta64]
_NumberLike_co = Union[int, float, complex, np.number, np.bool_]
_ScalarLike_co = Union[
int,
float,
complex,
str,
bytes,
np.generic,
]
# `_VoidLike_co` is technically not a scalar, but it's close enough
_VoidLike_co = Union[Tuple[Any, ...], np.void]
| 957 | Python | 29.903225 | 73 | 0.698015 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/__init__.py | """Private counterpart of ``numpy.typing``."""
from __future__ import annotations
from numpy import ufunc
from numpy.core.overrides import set_module
from typing import TYPE_CHECKING, final
@final # Disallow the creation of arbitrary `NBitBase` subclasses
@set_module("numpy.typing")
class NBitBase:
"""
A type representing `numpy.number` precision during static type checking.
Used exclusively for the purpose static type checking, `NBitBase`
represents the base of a hierarchical set of subclasses.
Each subsequent subclass is herein used for representing a lower level
of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
.. versionadded:: 1.20
Examples
--------
Below is a typical usage example: `NBitBase` is herein used for annotating
a function that takes a float and integer of arbitrary precision
as arguments and returns a new float of whichever precision is largest
(*e.g.* ``np.float16 + np.int64 -> np.float64``).
.. code-block:: python
>>> from __future__ import annotations
>>> from typing import TypeVar, TYPE_CHECKING
>>> import numpy as np
>>> import numpy.typing as npt
>>> T1 = TypeVar("T1", bound=npt.NBitBase)
>>> T2 = TypeVar("T2", bound=npt.NBitBase)
>>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]:
... return a + b
>>> a = np.float16()
>>> b = np.int64()
>>> out = add(a, b)
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.floating[numpy.typing._16Bit*]
... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
... # note: out: numpy.floating[numpy.typing._64Bit*]
"""
def __init_subclass__(cls) -> None:
allowed_names = {
"NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit",
"_64Bit", "_32Bit", "_16Bit", "_8Bit",
}
if cls.__name__ not in allowed_names:
raise TypeError('cannot inherit from final class "NBitBase"')
super().__init_subclass__()
# Silence errors about subclassing a `@final`-decorated class
class _256Bit(NBitBase): # type: ignore[misc]
pass
class _128Bit(_256Bit): # type: ignore[misc]
pass
class _96Bit(_128Bit): # type: ignore[misc]
pass
class _80Bit(_96Bit): # type: ignore[misc]
pass
class _64Bit(_80Bit): # type: ignore[misc]
pass
class _32Bit(_64Bit): # type: ignore[misc]
pass
class _16Bit(_32Bit): # type: ignore[misc]
pass
class _8Bit(_16Bit): # type: ignore[misc]
pass
from ._nested_sequence import (
_NestedSequence as _NestedSequence,
)
from ._nbit import (
_NBitByte as _NBitByte,
_NBitShort as _NBitShort,
_NBitIntC as _NBitIntC,
_NBitIntP as _NBitIntP,
_NBitInt as _NBitInt,
_NBitLongLong as _NBitLongLong,
_NBitHalf as _NBitHalf,
_NBitSingle as _NBitSingle,
_NBitDouble as _NBitDouble,
_NBitLongDouble as _NBitLongDouble,
)
from ._char_codes import (
_BoolCodes as _BoolCodes,
_UInt8Codes as _UInt8Codes,
_UInt16Codes as _UInt16Codes,
_UInt32Codes as _UInt32Codes,
_UInt64Codes as _UInt64Codes,
_Int8Codes as _Int8Codes,
_Int16Codes as _Int16Codes,
_Int32Codes as _Int32Codes,
_Int64Codes as _Int64Codes,
_Float16Codes as _Float16Codes,
_Float32Codes as _Float32Codes,
_Float64Codes as _Float64Codes,
_Complex64Codes as _Complex64Codes,
_Complex128Codes as _Complex128Codes,
_ByteCodes as _ByteCodes,
_ShortCodes as _ShortCodes,
_IntCCodes as _IntCCodes,
_IntPCodes as _IntPCodes,
_IntCodes as _IntCodes,
_LongLongCodes as _LongLongCodes,
_UByteCodes as _UByteCodes,
_UShortCodes as _UShortCodes,
_UIntCCodes as _UIntCCodes,
_UIntPCodes as _UIntPCodes,
_UIntCodes as _UIntCodes,
_ULongLongCodes as _ULongLongCodes,
_HalfCodes as _HalfCodes,
_SingleCodes as _SingleCodes,
_DoubleCodes as _DoubleCodes,
_LongDoubleCodes as _LongDoubleCodes,
_CSingleCodes as _CSingleCodes,
_CDoubleCodes as _CDoubleCodes,
_CLongDoubleCodes as _CLongDoubleCodes,
_DT64Codes as _DT64Codes,
_TD64Codes as _TD64Codes,
_StrCodes as _StrCodes,
_BytesCodes as _BytesCodes,
_VoidCodes as _VoidCodes,
_ObjectCodes as _ObjectCodes,
)
from ._scalars import (
_CharLike_co as _CharLike_co,
_BoolLike_co as _BoolLike_co,
_UIntLike_co as _UIntLike_co,
_IntLike_co as _IntLike_co,
_FloatLike_co as _FloatLike_co,
_ComplexLike_co as _ComplexLike_co,
_TD64Like_co as _TD64Like_co,
_NumberLike_co as _NumberLike_co,
_ScalarLike_co as _ScalarLike_co,
_VoidLike_co as _VoidLike_co,
)
from ._shape import (
_Shape as _Shape,
_ShapeLike as _ShapeLike,
)
from ._dtype_like import (
DTypeLike as DTypeLike,
_DTypeLike as _DTypeLike,
_SupportsDType as _SupportsDType,
_VoidDTypeLike as _VoidDTypeLike,
_DTypeLikeBool as _DTypeLikeBool,
_DTypeLikeUInt as _DTypeLikeUInt,
_DTypeLikeInt as _DTypeLikeInt,
_DTypeLikeFloat as _DTypeLikeFloat,
_DTypeLikeComplex as _DTypeLikeComplex,
_DTypeLikeTD64 as _DTypeLikeTD64,
_DTypeLikeDT64 as _DTypeLikeDT64,
_DTypeLikeObject as _DTypeLikeObject,
_DTypeLikeVoid as _DTypeLikeVoid,
_DTypeLikeStr as _DTypeLikeStr,
_DTypeLikeBytes as _DTypeLikeBytes,
_DTypeLikeComplex_co as _DTypeLikeComplex_co,
)
from ._array_like import (
ArrayLike as ArrayLike,
_ArrayLike as _ArrayLike,
_FiniteNestedSequence as _FiniteNestedSequence,
_SupportsArray as _SupportsArray,
_SupportsArrayFunc as _SupportsArrayFunc,
_ArrayLikeInt as _ArrayLikeInt,
_ArrayLikeBool_co as _ArrayLikeBool_co,
_ArrayLikeUInt_co as _ArrayLikeUInt_co,
_ArrayLikeInt_co as _ArrayLikeInt_co,
_ArrayLikeFloat_co as _ArrayLikeFloat_co,
_ArrayLikeComplex_co as _ArrayLikeComplex_co,
_ArrayLikeNumber_co as _ArrayLikeNumber_co,
_ArrayLikeTD64_co as _ArrayLikeTD64_co,
_ArrayLikeDT64_co as _ArrayLikeDT64_co,
_ArrayLikeObject_co as _ArrayLikeObject_co,
_ArrayLikeVoid_co as _ArrayLikeVoid_co,
_ArrayLikeStr_co as _ArrayLikeStr_co,
_ArrayLikeBytes_co as _ArrayLikeBytes_co,
_ArrayLikeUnknown as _ArrayLikeUnknown,
_UnknownType as _UnknownType,
)
from ._generic_alias import (
NDArray as NDArray,
_DType as _DType,
_GenericAlias as _GenericAlias,
)
if TYPE_CHECKING:
from ._ufunc import (
_UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1,
_UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1,
_UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2,
_UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2,
_GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1,
)
else:
# Declare the (type-check-only) ufunc subclasses as ufunc aliases during
# runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834)
_UFunc_Nin1_Nout1 = ufunc
_UFunc_Nin2_Nout1 = ufunc
_UFunc_Nin1_Nout2 = ufunc
_UFunc_Nin2_Nout2 = ufunc
_GUFunc_Nin2_Nout1 = ufunc
| 7,108 | Python | 30.455752 | 83 | 0.662915 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_nested_sequence.py | """A module containing the `_NestedSequence` protocol."""
from __future__ import annotations
from typing import (
Any,
Iterator,
overload,
TypeVar,
Protocol,
runtime_checkable,
)
__all__ = ["_NestedSequence"]
_T_co = TypeVar("_T_co", covariant=True)
@runtime_checkable
class _NestedSequence(Protocol[_T_co]):
"""A protocol for representing nested sequences.
Warning
-------
`_NestedSequence` currently does not work in combination with typevars,
*e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
See Also
--------
collections.abc.Sequence
ABCs for read-only and mutable :term:`sequences`.
Examples
--------
.. code-block:: python
>>> from __future__ import annotations
>>> from typing import TYPE_CHECKING
>>> import numpy as np
>>> from numpy._typing import _NestedSequnce
>>> def get_dtype(seq: _NestedSequnce[float]) -> np.dtype[np.float64]:
... return np.asarray(seq).dtype
>>> a = get_dtype([1.0])
>>> b = get_dtype([[1.0]])
>>> c = get_dtype([[[1.0]]])
>>> d = get_dtype([[[[1.0]]]])
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
"""
def __len__(self, /) -> int:
"""Implement ``len(self)``."""
raise NotImplementedError
@overload
def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ...
@overload
def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ...
def __getitem__(self, index, /):
"""Implement ``self[x]``."""
raise NotImplementedError
def __contains__(self, x: object, /) -> bool:
"""Implement ``x in self``."""
raise NotImplementedError
def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
"""Implement ``iter(self)``."""
raise NotImplementedError
def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
"""Implement ``reversed(self)``."""
raise NotImplementedError
def count(self, value: Any, /) -> int:
"""Return the number of occurrences of `value`."""
raise NotImplementedError
def index(self, value: Any, /) -> int:
"""Return the first index of `value`."""
raise NotImplementedError
| 2,699 | Python | 28.032258 | 80 | 0.551315 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_array_like.py | from __future__ import annotations
# NOTE: Import `Sequence` from `typing` as we it is needed for a type-alias,
# not an annotation
from collections.abc import Collection, Callable
from typing import Any, Sequence, Protocol, Union, TypeVar, runtime_checkable
from numpy import (
ndarray,
dtype,
generic,
bool_,
unsignedinteger,
integer,
floating,
complexfloating,
number,
timedelta64,
datetime64,
object_,
void,
str_,
bytes_,
)
from ._nested_sequence import _NestedSequence
_T = TypeVar("_T")
_ScalarType = TypeVar("_ScalarType", bound=generic)
_DType = TypeVar("_DType", bound="dtype[Any]")
_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
# The `_SupportsArray` protocol only cares about the default dtype
# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
# array.
# Concrete implementations of the protocol are responsible for adding
# any and all remaining overloads
@runtime_checkable
class _SupportsArray(Protocol[_DType_co]):
def __array__(self) -> ndarray[Any, _DType_co]: ...
@runtime_checkable
class _SupportsArrayFunc(Protocol):
"""A protocol class representing `~class.__array_function__`."""
def __array_function__(
self,
func: Callable[..., Any],
types: Collection[type[Any]],
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> object: ...
# TODO: Wait until mypy supports recursive objects in combination with typevars
_FiniteNestedSequence = Union[
_T,
Sequence[_T],
Sequence[Sequence[_T]],
Sequence[Sequence[Sequence[_T]]],
Sequence[Sequence[Sequence[Sequence[_T]]]],
]
# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic`
_ArrayLike = Union[
_SupportsArray["dtype[_ScalarType]"],
_NestedSequence[_SupportsArray["dtype[_ScalarType]"]],
]
# A union representing array-like objects; consists of two typevars:
# One representing types that can be parametrized w.r.t. `np.dtype`
# and another one for the rest
_DualArrayLike = Union[
_SupportsArray[_DType],
_NestedSequence[_SupportsArray[_DType]],
_T,
_NestedSequence[_T],
]
# TODO: support buffer protocols once
#
# https://bugs.python.org/issue27501
#
# is resolved. See also the mypy issue:
#
# https://github.com/python/typing/issues/593
ArrayLike = _DualArrayLike[
dtype,
Union[bool, int, float, complex, str, bytes],
]
# `ArrayLike<X>_co`: array-like objects that can be coerced into `X`
# given the casting rules `same_kind`
_ArrayLikeBool_co = _DualArrayLike[
"dtype[bool_]",
bool,
]
_ArrayLikeUInt_co = _DualArrayLike[
"dtype[Union[bool_, unsignedinteger[Any]]]",
bool,
]
_ArrayLikeInt_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any]]]",
Union[bool, int],
]
_ArrayLikeFloat_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any], floating[Any]]]",
Union[bool, int, float],
]
_ArrayLikeComplex_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]",
Union[bool, int, float, complex],
]
_ArrayLikeNumber_co = _DualArrayLike[
"dtype[Union[bool_, number[Any]]]",
Union[bool, int, float, complex],
]
_ArrayLikeTD64_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any], timedelta64]]",
Union[bool, int],
]
_ArrayLikeDT64_co = Union[
_SupportsArray["dtype[datetime64]"],
_NestedSequence[_SupportsArray["dtype[datetime64]"]],
]
_ArrayLikeObject_co = Union[
_SupportsArray["dtype[object_]"],
_NestedSequence[_SupportsArray["dtype[object_]"]],
]
_ArrayLikeVoid_co = Union[
_SupportsArray["dtype[void]"],
_NestedSequence[_SupportsArray["dtype[void]"]],
]
_ArrayLikeStr_co = _DualArrayLike[
"dtype[str_]",
str,
]
_ArrayLikeBytes_co = _DualArrayLike[
"dtype[bytes_]",
bytes,
]
_ArrayLikeInt = _DualArrayLike[
"dtype[integer[Any]]",
int,
]
# Extra ArrayLike type so that pyright can deal with NDArray[Any]
# Used as the first overload, should only match NDArray[Any],
# not any actual types.
# https://github.com/numpy/numpy/pull/22193
class _UnknownType:
...
_ArrayLikeUnknown = _DualArrayLike[
"dtype[_UnknownType]",
_UnknownType,
]
| 4,212 | Python | 25.496855 | 82 | 0.674739 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_shape.py | from typing import Sequence, Tuple, Union, SupportsIndex
_Shape = Tuple[int, ...]
# Anything that can be coerced to a shape tuple
_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
| 191 | Python | 26.428568 | 58 | 0.759162 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_extended_precision.py | """A module with platform-specific extended precision
`numpy.number` subclasses.
The subclasses are defined here (instead of ``__init__.pyi``) such
that they can be imported conditionally via the numpy's mypy plugin.
"""
from typing import TYPE_CHECKING
import numpy as np
from . import (
_80Bit,
_96Bit,
_128Bit,
_256Bit,
)
if TYPE_CHECKING:
uint128 = np.unsignedinteger[_128Bit]
uint256 = np.unsignedinteger[_256Bit]
int128 = np.signedinteger[_128Bit]
int256 = np.signedinteger[_256Bit]
float80 = np.floating[_80Bit]
float96 = np.floating[_96Bit]
float128 = np.floating[_128Bit]
float256 = np.floating[_256Bit]
complex160 = np.complexfloating[_80Bit, _80Bit]
complex192 = np.complexfloating[_96Bit, _96Bit]
complex256 = np.complexfloating[_128Bit, _128Bit]
complex512 = np.complexfloating[_256Bit, _256Bit]
else:
uint128 = Any
uint256 = Any
int128 = Any
int256 = Any
float80 = Any
float96 = Any
float128 = Any
float256 = Any
complex160 = Any
complex192 = Any
complex256 = Any
complex512 = Any
| 1,111 | Python | 24.272727 | 68 | 0.675968 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_scripts.py | """ Test scripts
Test that we can run executable scripts that have been installed with numpy.
"""
import sys
import os
import pytest
from os.path import join as pathjoin, isfile, dirname
import subprocess
import numpy as np
from numpy.testing import assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
def find_f2py_commands():
if sys.platform == 'win32':
exe_dir = dirname(sys.executable)
if exe_dir.endswith('Scripts'): # virtualenv
return [os.path.join(exe_dir, 'f2py')]
else:
return [os.path.join(exe_dir, "Scripts", 'f2py')]
else:
# Three scripts are installed in Unix-like systems:
# 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
# if installed with python3.9 the scripts would be named
# 'f2py', 'f2py3', and 'f2py3.9'.
version = sys.version_info
major = str(version.major)
minor = str(version.minor)
return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
@pytest.mark.xfail(reason="Test is unreliable")
@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
def test_f2py(f2py_cmd):
# test that we can run f2py script
stdout = subprocess.check_output([f2py_cmd, '-v'])
assert_equal(stdout.strip(), np.__version__.encode('ascii'))
def test_pep338():
stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
assert_equal(stdout.strip(), np.__version__.encode('ascii'))
| 1,573 | Python | 32.489361 | 76 | 0.649078 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_matlib.py | import numpy as np
import numpy.matlib
from numpy.testing import assert_array_equal, assert_
def test_empty():
x = numpy.matlib.empty((2,))
assert_(isinstance(x, np.matrix))
assert_(x.shape, (1, 2))
def test_ones():
assert_array_equal(numpy.matlib.ones((2, 3)),
np.matrix([[ 1., 1., 1.],
[ 1., 1., 1.]]))
assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]]))
def test_zeros():
assert_array_equal(numpy.matlib.zeros((2, 3)),
np.matrix([[ 0., 0., 0.],
[ 0., 0., 0.]]))
assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]]))
def test_identity():
x = numpy.matlib.identity(2, dtype=int)
assert_array_equal(x, np.matrix([[1, 0], [0, 1]]))
def test_eye():
xc = numpy.matlib.eye(3, k=1, dtype=int)
assert_array_equal(xc, np.matrix([[ 0, 1, 0],
[ 0, 0, 1],
[ 0, 0, 0]]))
assert xc.flags.c_contiguous
assert not xc.flags.f_contiguous
xf = numpy.matlib.eye(3, 4, dtype=int, order='F')
assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0],
[ 0, 1, 0, 0],
[ 0, 0, 1, 0]]))
assert not xf.flags.c_contiguous
assert xf.flags.f_contiguous
def test_rand():
x = numpy.matlib.rand(3)
# check matrix type, array would have shape (3,)
assert_(x.ndim == 2)
def test_randn():
x = np.matlib.randn(3)
# check matrix type, array would have shape (3,)
assert_(x.ndim == 2)
def test_repmat():
a1 = np.arange(4)
x = numpy.matlib.repmat(a1, 2, 2)
y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert_array_equal(x, y)
| 1,852 | Python | 30.406779 | 70 | 0.487041 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_reloading.py | from numpy.testing import assert_raises, assert_warns, assert_, assert_equal
from numpy.compat import pickle
import sys
import subprocess
import textwrap
from importlib import reload
def test_numpy_reloading():
# gh-7844. Also check that relevant globals retain their identity.
import numpy as np
import numpy._globals
_NoValue = np._NoValue
VisibleDeprecationWarning = np.VisibleDeprecationWarning
ModuleDeprecationWarning = np.ModuleDeprecationWarning
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
assert_raises(RuntimeError, reload, numpy._globals)
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
def test_novalue():
import numpy as np
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(repr(np._NoValue), '<no value>')
assert_(pickle.loads(pickle.dumps(np._NoValue,
protocol=proto)) is np._NoValue)
def test_full_reimport():
"""At the time of writing this, it is *not* truly supported, but
apparently enough users rely on it, for it to be an annoying change
when it started failing previously.
"""
# Test within a new process, to ensure that we do not mess with the
# global state during the test run (could lead to cryptic test failures).
# This is generally unsafe, especially, since we also reload the C-modules.
code = textwrap.dedent(r"""
import sys
from pytest import warns
import numpy as np
for k in list(sys.modules.keys()):
if "numpy" in k:
del sys.modules[k]
with warns(UserWarning):
import numpy as np
""")
p = subprocess.run([sys.executable, '-c', code], capture_output=True)
if p.returncode:
raise AssertionError(
f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
)
| 2,244 | Python | 33.538461 | 79 | 0.677362 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_public_api.py | import sys
import sysconfig
import subprocess
import pkgutil
import types
import importlib
import warnings
import numpy as np
import numpy
import pytest
try:
import ctypes
except ImportError:
ctypes = None
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
module_name = module.__name__
results = {}
for name in dir(module):
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__')
and item.__module__ != module_name):
results[name] = item.__module__ + '.' + item.__name__
return results
def test_numpy_namespace():
# None of these objects are publicly documented to be part of the main
# NumPy namespace (some are useful though, others need to be cleaned up)
undocumented = {
'Tester': 'numpy.testing._private.nosetester.NoseTester',
'_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
'add_docstring': 'numpy.core._multiarray_umath.add_docstring',
'add_newdoc': 'numpy.core.function_base.add_newdoc',
'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
'byte_bounds': 'numpy.lib.utils.byte_bounds',
'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays',
'deprecate': 'numpy.lib.utils.deprecate',
'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc',
'disp': 'numpy.lib.function_base.disp',
'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
'get_include': 'numpy.lib.utils.get_include',
'recfromcsv': 'numpy.lib.npyio.recfromcsv',
'recfromtxt': 'numpy.lib.npyio.recfromtxt',
'safe_eval': 'numpy.lib.utils.safe_eval',
'set_string_function': 'numpy.core.arrayprint.set_string_function',
'show_config': 'numpy.__config__.show',
'who': 'numpy.lib.utils.who',
}
# We override dir to not show these members
allowlist = undocumented
bad_results = check_dir(np)
# pytest gives better error messages with the builtin assert than with
# assert_equal
assert bad_results == allowlist
@pytest.mark.parametrize('name', ['testing', 'Tester'])
def test_import_lazy_import(name):
"""Make sure we can actually use the modules we lazy load.
While not exported as part of the public API, it was accessible. With the
use of __getattr__ and __dir__, this isn't always true It can happen that
an infinite recursion may happen.
This is the only way I found that would force the failure to appear on the
badly implemented code.
We also test for the presence of the lazily imported modules in dir
"""
exe = (sys.executable, '-c', "import numpy; numpy." + name)
result = subprocess.check_output(exe)
assert not result
# Make sure they are still in the __dir__
assert name in dir(np)
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(np)) == len(set(dir(np)))
def test_numpy_linalg():
bad_results = check_dir(np.linalg)
assert bad_results == {}
def test_numpy_fft():
bad_results = check_dir(np.fft)
assert bad_results == {}
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
def test_NPY_NO_EXPORT():
cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
# Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
f = getattr(cdll, 'test_not_exported', None)
assert f is None, ("'test_not_exported' is mistakenly exported, "
"NPY_NO_EXPORT does not work")
# Historically NumPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
#
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
# of underscores) but should not be used. For many of those modules the
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
PUBLIC_MODULES = ['numpy.' + s for s in [
"array_api",
"array_api.linalg",
"ctypeslib",
"distutils",
"distutils.cpuinfo",
"distutils.exec_command",
"distutils.misc_util",
"distutils.log",
"distutils.system_info",
"doc",
"doc.constants",
"doc.ufuncs",
"f2py",
"fft",
"lib",
"lib.format", # was this meant to be public?
"lib.mixins",
"lib.recfunctions",
"lib.scimath",
"lib.stride_tricks",
"linalg",
"ma",
"ma.extras",
"ma.mrecords",
"matlib",
"polynomial",
"polynomial.chebyshev",
"polynomial.hermite",
"polynomial.hermite_e",
"polynomial.laguerre",
"polynomial.legendre",
"polynomial.polynomial",
"random",
"testing",
"typing",
"typing.mypy_plugin",
"version",
]]
PUBLIC_ALIASED_MODULES = [
"numpy.char",
"numpy.emath",
"numpy.rec",
]
PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"compat",
"compat.py3k",
"conftest",
"core",
"core.arrayprint",
"core.defchararray",
"core.einsumfunc",
"core.fromnumeric",
"core.function_base",
"core.getlimits",
"core.memmap",
"core.multiarray",
"core.numeric",
"core.numerictypes",
"core.overrides",
"core.records",
"core.shape_base",
"core.umath",
"core.umath_tests",
"distutils.armccompiler",
"distutils.ccompiler",
'distutils.ccompiler_opt',
"distutils.command",
"distutils.command.autodist",
"distutils.command.bdist_rpm",
"distutils.command.build",
"distutils.command.build_clib",
"distutils.command.build_ext",
"distutils.command.build_py",
"distutils.command.build_scripts",
"distutils.command.build_src",
"distutils.command.config",
"distutils.command.config_compiler",
"distutils.command.develop",
"distutils.command.egg_info",
"distutils.command.install",
"distutils.command.install_clib",
"distutils.command.install_data",
"distutils.command.install_headers",
"distutils.command.sdist",
"distutils.conv_template",
"distutils.core",
"distutils.extension",
"distutils.fcompiler",
"distutils.fcompiler.absoft",
"distutils.fcompiler.arm",
"distutils.fcompiler.compaq",
"distutils.fcompiler.environment",
"distutils.fcompiler.g95",
"distutils.fcompiler.gnu",
"distutils.fcompiler.hpux",
"distutils.fcompiler.ibm",
"distutils.fcompiler.intel",
"distutils.fcompiler.lahey",
"distutils.fcompiler.mips",
"distutils.fcompiler.nag",
"distutils.fcompiler.none",
"distutils.fcompiler.pathf95",
"distutils.fcompiler.pg",
"distutils.fcompiler.nv",
"distutils.fcompiler.sun",
"distutils.fcompiler.vast",
"distutils.fcompiler.fujitsu",
"distutils.from_template",
"distutils.intelccompiler",
"distutils.lib2def",
"distutils.line_endings",
"distutils.mingw32ccompiler",
"distutils.msvccompiler",
"distutils.npy_pkg_config",
"distutils.numpy_distribution",
"distutils.pathccompiler",
"distutils.unixccompiler",
"dual",
"f2py.auxfuncs",
"f2py.capi_maps",
"f2py.cb_rules",
"f2py.cfuncs",
"f2py.common_rules",
"f2py.crackfortran",
"f2py.diagnose",
"f2py.f2py2e",
"f2py.f90mod_rules",
"f2py.func2subr",
"f2py.rules",
"f2py.symbolic",
"f2py.use_rules",
"fft.helper",
"lib.arraypad",
"lib.arraysetops",
"lib.arrayterator",
"lib.function_base",
"lib.histograms",
"lib.index_tricks",
"lib.nanfunctions",
"lib.npyio",
"lib.polynomial",
"lib.shape_base",
"lib.twodim_base",
"lib.type_check",
"lib.ufunclike",
"lib.user_array", # note: not in np.lib, but probably should just be deleted
"lib.utils",
"linalg.lapack_lite",
"linalg.linalg",
"ma.bench",
"ma.core",
"ma.testutils",
"ma.timer_comparison",
"matrixlib",
"matrixlib.defmatrix",
"polynomial.polyutils",
"random.mtrand",
"random.bit_generator",
"testing.print_coercion_tables",
"testing.utils",
]]
def is_unexpected(name):
"""Check if this needs to be considered."""
if '._' in name or '.tests' in name or '.setup' in name:
return False
if name in PUBLIC_MODULES:
return False
if name in PUBLIC_ALIASED_MODULES:
return False
if name in PRIVATE_BUT_PRESENT_MODULES:
return False
return True
# These are present in a directory with an __init__.py but cannot be imported
# code_generators/ isn't installed, but present for an inplace build
SKIP_LIST = [
"numpy.core.code_generators",
"numpy.core.code_generators.genapi",
"numpy.core.code_generators.generate_umath",
"numpy.core.code_generators.ufunc_docstrings",
"numpy.core.code_generators.generate_numpy_api",
"numpy.core.code_generators.generate_ufunc_api",
"numpy.core.code_generators.numpy_api",
"numpy.core.code_generators.generate_umath_doc",
"numpy.core.cversions",
"numpy.core.generate_numpy_api",
"numpy.distutils.msvc9compiler",
]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
prefix=np.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'numpy.math',
'numpy.distutils.log.sys',
'numpy.doc.constants.re',
'numpy.doc.constants.textwrap',
'numpy.lib.emath',
'numpy.lib.math',
'numpy.matlib.char',
'numpy.matlib.rec',
'numpy.matlib.emath',
'numpy.matlib.math',
'numpy.matlib.linalg',
'numpy.matlib.fft',
'numpy.matlib.random',
'numpy.matlib.ctypeslib',
'numpy.matlib.ma',
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames. So this test is more thorough, and checks this like:
import .lib.scimath as emath
To check if something in a module is (effectively) public, one can check if
there's anything in that namespace that's a public function/object but is
not exposed in a higher-level namespace. For example for a `numpy.lib`
submodule::
mod = np.lib.mixins
for obj in mod.__all__:
if obj in np.__all__:
continue
elif obj in np.lib.__all__:
continue
else:
print(obj)
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname):
if fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("numpy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
"modules: {}".format(unexpected_members))
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
"imported: {}".format(module_names))
for module_name in PUBLIC_ALIASED_MODULES:
try:
eval(module_name)
except AttributeError:
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that were not "
"found: {}".format(module_names))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
"{}".format(module_names))
@pytest.mark.xfail(
sysconfig.get_config_var("Py_DEBUG") is not None,
reason=(
"NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, "
"which does not expose the `array_api` entry point. "
"See https://github.com/numpy/numpy/pull/19800"
),
)
def test_array_api_entry_point():
"""
Entry point for Array API implementation can be found with importlib and
returns the numpy.array_api namespace.
"""
eps = importlib.metadata.entry_points()
try:
xp_eps = eps.select(group="array_api")
except AttributeError:
# The select interface for entry_points was introduced in py3.10,
# deprecating its dict interface. We fallback to dict keys for finding
# Array API entry points so that running this test in <=3.9 will
# still work - see https://github.com/numpy/numpy/pull/19800.
xp_eps = eps.get("array_api", [])
assert len(xp_eps) > 0, "No entry points for 'array_api' found"
try:
ep = next(ep for ep in xp_eps if ep.name == "numpy")
except StopIteration:
raise AssertionError("'numpy' not in array_api entry points") from None
xp = ep.load()
msg = (
f"numpy entry point value '{ep.value}' "
"does not point to our Array API implementation"
)
assert xp is numpy.array_api, msg
| 15,916 | Python | 30.707171 | 85 | 0.635964 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_ctypeslib.py | import sys
import pytest
import weakref
from pathlib import Path
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
import ctypes
except ImportError:
ctypes = None
else:
cdll = None
test_cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__)
except OSError:
pass
try:
test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
except OSError:
pass
if cdll is None:
cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
if test_cdll is None:
test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
c_forward_pointer = test_cdll.forward_pointer
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
class TestLoadLibrary:
def test_basic(self):
loader_path = np.core._multiarray_umath.__file__
out1 = load_library('_multiarray_umath', loader_path)
out2 = load_library(Path('_multiarray_umath'), loader_path)
out3 = load_library('_multiarray_umath', Path(loader_path))
out4 = load_library(b'_multiarray_umath', loader_path)
assert isinstance(out1, ctypes.CDLL)
assert out1 is out2 is out3 is out4
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
try:
try:
so = get_shared_lib_extension(is_python_ext=True)
# Should succeed
load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__)
except ImportError:
print("No distutils available, skipping test.")
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
class TestNdpointer:
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
assert_(p.from_param(np.array(1)))
assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
assert_raises(TypeError, p.from_param, np.array(1))
assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
assert_(p.from_param(np.array([[1, 2]])))
assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
assert_(p.from_param(x))
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
# shapes are normalized
assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
# 1.12 <= v < 1.16 had a bug that made these fail
assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestNdpointerCFunc:
def test_arguments(self):
""" Test that arguments are coerced from arrays """
c_forward_pointer.restype = ctypes.c_void_p
c_forward_pointer.argtypes = (ndpointer(ndim=2),)
c_forward_pointer(np.zeros((2, 3)))
# too many dimensions
assert_raises(
ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
@pytest.mark.parametrize(
'dt', [
float,
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
], ids=[
'float',
'overlapping-fields'
]
)
def test_return(self, dt):
""" Test that return values are coerced to arrays """
arr = np.zeros((2, 3), dt)
ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
# check that the arrays are equivalent views on the same data
arr2 = c_forward_pointer(arr)
assert_equal(arr2.dtype, arr.dtype)
assert_equal(arr2.shape, arr.shape)
assert_equal(
arr2.__array_interface__['data'],
arr.__array_interface__['data']
)
def test_vague_return_value(self):
""" Test that vague ndpointer return values do not promote to arrays """
arr = np.zeros((2, 3))
ptr_type = ndpointer(dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
ret = c_forward_pointer(arr)
assert_(isinstance(ret, ptr_type))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsArray:
def test_array(self):
from ctypes import c_int
pair_t = c_int * 2
a = as_array(pair_t(1, 2))
assert_equal(a.shape, (2,))
assert_array_equal(a, np.array([1, 2]))
a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
assert_equal(a.shape, (3, 2))
assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
def test_pointer(self):
from ctypes import c_int, cast, POINTER
p = cast((c_int * 10)(*range(10)), POINTER(c_int))
a = as_array(p, shape=(10,))
assert_equal(a.shape, (10,))
assert_array_equal(a, np.arange(10))
a = as_array(p, shape=(2, 5))
assert_equal(a.shape, (2, 5))
assert_array_equal(a, np.arange(10).reshape((2, 5)))
# shape argument is required
assert_raises(TypeError, as_array, p)
def test_struct_array_pointer(self):
from ctypes import c_int16, Structure, pointer
class Struct(Structure):
_fields_ = [('a', c_int16)]
Struct3 = 3 * Struct
c_array = (2 * Struct3)(
Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
)
expected = np.array([
[(1,), (2,), (3,)],
[(4,), (5,), (6,)],
], dtype=[('a', np.int16)])
def check(x):
assert_equal(x.dtype, expected.dtype)
assert_equal(x, expected)
# all of these should be equivalent
check(as_array(c_array))
check(as_array(pointer(c_array), shape=()))
check(as_array(pointer(c_array[0]), shape=(2,)))
check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
def test_reference_cycles(self):
# related to gh-6511
import ctypes
# create array to work with
# don't use int/long to avoid running into bpo-10746
N = 100
a = np.arange(N, dtype=np.short)
# get pointer to array
pnt = np.ctypeslib.as_ctypes(a)
with np.testing.assert_no_gc_cycles():
# decay the array above to a pointer to its first element
newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short))
# and construct an array using this data
b = np.ctypeslib.as_array(newpnt, (N,))
# now delete both, which should cleanup both objects
del newpnt, b
def test_segmentation_fault(self):
arr = np.zeros((224, 224, 3))
c_arr = np.ctypeslib.as_ctypes(arr)
arr_ref = weakref.ref(arr)
del arr
# check the reference wasn't cleaned up
assert_(arr_ref() is not None)
# check we avoid the segfault
c_arr[0][0][0]
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsCtypesType:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_le__)
dt = np.dtype('>u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_be__)
dt = np.dtype('u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16)
def test_subarray(self):
dt = np.dtype((np.int32, (2, 3)))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, 2 * (3 * ctypes.c_int32))
def test_structure(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
])
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_structure_aligned(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
], align=True)
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('', ctypes.c_char * 2), # padding
('b', ctypes.c_uint32),
])
def test_union(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 0],
formats=[np.uint16, np.uint32]
))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_padded_union(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 0],
formats=[np.uint16, np.uint32],
itemsize=5,
))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
('', ctypes.c_char * 5), # padding
])
def test_overlapping(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 2],
formats=[np.uint32, np.uint32]
))
assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
| 12,290 | Python | 32.308943 | 93 | 0.55297 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test__all__.py |
import collections
import numpy as np
def test_no_duplicates_in_np__all__():
# Regression test for gh-10198.
dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1}
assert len(dups) == 0
| 221 | Python | 21.199998 | 78 | 0.638009 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_numpy_version.py | """
Check the numpy version is valid.
Note that a development version is marked by the presence of 'dev0' or '+'
in the version string, all else is treated as a release. The version string
itself is set from the output of ``git describe`` which relies on tags.
Examples
--------
Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2
Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0
Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a
Note that a release is determined by the version string, which in turn
is controlled by the result of the ``git describe`` command.
"""
import re
import numpy as np
from numpy.testing import assert_
def test_valid_numpy_version():
# Verify that the numpy version is a valid one (no .post suffix or other
# nonsense). See gh-6431 for an issue caused by an invalid version.
version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9]|)"
dev_suffix = r"(\.dev0|)(\+[0-9]*\.g[0-9a-f]+|)"
if np.version.release:
res = re.match(version_pattern + '$', np.__version__)
else:
res = re.match(version_pattern + dev_suffix + '$', np.__version__)
assert_(res is not None, np.__version__)
def test_short_version():
# Check numpy.short_version actually exists
if np.version.release:
assert_(np.__version__ == np.version.short_version,
"short_version mismatch in release version")
else:
assert_(np.__version__.split("+")[0] == np.version.short_version,
"short_version mismatch in development version")
| 1,575 | Python | 34.022221 | 77 | 0.653333 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_warnings.py | """
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all.
"""
import pytest
from pathlib import Path
import ast
import tokenize
import numpy
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"warnings should have an appropriate stacklevel; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "testing/tests/test_warnings.py" == self.__filename:
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" in args:
return
raise AssertionError(
"warnings should have an appropriate stacklevel; found in "
"{} on line {}".format(self.__filename, node.lineno))
@pytest.mark.slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
for path in base.rglob("*.py"):
if base / "testing" in path.parents:
continue
if path == base / "__init__.py":
continue
if path == base / "random" / "__init__.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
| 2,280 | Python | 29.413333 | 79 | 0.556579 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_utility_functions.py | from __future__ import annotations
from ._array_object import Array
from typing import Optional, Tuple, Union
import numpy as np
def all(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.all <numpy.all>`.
See its docstring for more information.
"""
return Array._new(np.asarray(np.all(x._array, axis=axis, keepdims=keepdims)))
def any(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.any <numpy.any>`.
See its docstring for more information.
"""
return Array._new(np.asarray(np.any(x._array, axis=axis, keepdims=keepdims)))
| 824 | Python | 20.710526 | 81 | 0.618932 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_sorting_functions.py | from __future__ import annotations
from ._array_object import Array
import numpy as np
# Note: the descending keyword argument is new in this function
def argsort(
x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.argsort <numpy.argsort>`.
See its docstring for more information.
"""
# Note: this keyword argument is different, and the default is different.
kind = "stable" if stable else "quicksort"
if not descending:
res = np.argsort(x._array, axis=axis, kind=kind)
else:
# As NumPy has no native descending sort, we imitate it here. Note that
# simply flipping the results of np.argsort(x._array, ...) would not
# respect the relative order like it would in native descending sorts.
res = np.flip(
np.argsort(np.flip(x._array, axis=axis), axis=axis, kind=kind),
axis=axis,
)
# Rely on flip()/argsort() to validate axis
normalised_axis = axis if axis >= 0 else x.ndim + axis
max_i = x.shape[normalised_axis] - 1
res = max_i - res
return Array._new(res)
# Note: the descending keyword argument is new in this function
def sort(
x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.sort <numpy.sort>`.
See its docstring for more information.
"""
# Note: this keyword argument is different, and the default is different.
kind = "stable" if stable else "quicksort"
res = np.sort(x._array, axis=axis, kind=kind)
if descending:
res = np.flip(res, axis=axis)
return Array._new(res)
| 1,754 | Python | 34.099999 | 81 | 0.63626 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_creation_functions.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
if TYPE_CHECKING:
from ._typing import (
Array,
Device,
Dtype,
NestedSequence,
SupportsBufferProtocol,
)
from collections.abc import Sequence
from ._dtypes import _all_dtypes
import numpy as np
def _check_valid_dtype(dtype):
# Note: Only spelling dtypes as the dtype objects is supported.
# We use this instead of "dtype in _all_dtypes" because the dtype objects
# define equality with the sorts of things we want to disallow.
for d in (None,) + _all_dtypes:
if dtype is d:
return
raise ValueError("dtype must be one of the supported dtypes")
def asarray(
obj: Union[
Array,
bool,
int,
float,
NestedSequence[bool | int | float],
SupportsBufferProtocol,
],
/,
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
copy: Optional[Union[bool, np._CopyMode]] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.
See its docstring for more information.
"""
# _array_object imports in this file are inside the functions to avoid
# circular imports
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
if copy in (False, np._CopyMode.IF_NEEDED):
# Note: copy=False is not yet implemented in np.asarray
raise NotImplementedError("copy=False is not yet implemented")
if isinstance(obj, Array):
if dtype is not None and obj.dtype != dtype:
copy = True
if copy in (True, np._CopyMode.ALWAYS):
return Array._new(np.array(obj._array, copy=True, dtype=dtype))
return obj
if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
# Give a better error message in this case. NumPy would convert this
# to an object array. TODO: This won't handle large integers in lists.
raise OverflowError("Integer out of bounds for array dtypes")
res = np.asarray(obj, dtype=dtype)
return Array._new(res)
def arange(
start: Union[int, float],
/,
stop: Optional[Union[int, float]] = None,
step: Union[int, float] = 1,
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arange <numpy.arange>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
def empty(
shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.empty <numpy.empty>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.empty(shape, dtype=dtype))
def empty_like(
x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.empty_like <numpy.empty_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.empty_like(x._array, dtype=dtype))
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.eye <numpy.eye>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
def from_dlpack(x: object, /) -> Array:
from ._array_object import Array
return Array._new(np.from_dlpack(x))
def full(
shape: Union[int, Tuple[int, ...]],
fill_value: Union[int, float],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.full <numpy.full>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
if isinstance(fill_value, Array) and fill_value.ndim == 0:
fill_value = fill_value._array
res = np.full(shape, fill_value, dtype=dtype)
if res.dtype not in _all_dtypes:
# This will happen if the fill value is not something that NumPy
# coerces to one of the acceptable dtypes.
raise TypeError("Invalid input to full")
return Array._new(res)
def full_like(
x: Array,
/,
fill_value: Union[int, float],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.full_like <numpy.full_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
res = np.full_like(x._array, fill_value, dtype=dtype)
if res.dtype not in _all_dtypes:
# This will happen if the fill value is not something that NumPy
# coerces to one of the acceptable dtypes.
raise TypeError("Invalid input to full_like")
return Array._new(res)
def linspace(
start: Union[int, float],
stop: Union[int, float],
/,
num: int,
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
endpoint: bool = True,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linspace <numpy.linspace>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
"""
Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`.
See its docstring for more information.
"""
from ._array_object import Array
# Note: unlike np.meshgrid, only inputs with all the same dtype are
# allowed
if len({a.dtype for a in arrays}) > 1:
raise ValueError("meshgrid inputs must all have the same dtype")
return [
Array._new(array)
for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
]
def ones(
shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.ones <numpy.ones>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.ones(shape, dtype=dtype))
def ones_like(
x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.ones_like <numpy.ones_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.ones_like(x._array, dtype=dtype))
def tril(x: Array, /, *, k: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.tril <numpy.tril>`.
See its docstring for more information.
"""
from ._array_object import Array
if x.ndim < 2:
# Note: Unlike np.tril, x must be at least 2-D
raise ValueError("x must be at least 2-dimensional for tril")
return Array._new(np.tril(x._array, k=k))
def triu(x: Array, /, *, k: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.triu <numpy.triu>`.
See its docstring for more information.
"""
from ._array_object import Array
if x.ndim < 2:
# Note: Unlike np.triu, x must be at least 2-D
raise ValueError("x must be at least 2-dimensional for triu")
return Array._new(np.triu(x._array, k=k))
def zeros(
shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.zeros <numpy.zeros>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.zeros(shape, dtype=dtype))
def zeros_like(
x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.zeros_like <numpy.zeros_like>`.
See its docstring for more information.
"""
from ._array_object import Array
_check_valid_dtype(dtype)
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
return Array._new(np.zeros_like(x._array, dtype=dtype))
| 10,050 | Python | 27.553977 | 86 | 0.628358 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_array_object.py | """
Wrapper class around the ndarray object for the array API standard.
The array API standard defines some behaviors differently than ndarray, in
particular, type promotion rules are different (the standard has no
value-based casting). The standard also specifies a more limited subset of
array methods and functionalities than are implemented on ndarray. Since the
goal of the array_api namespace is to be a minimal implementation of the array
API standard, we need to define a separate wrapper class for the array_api
namespace.
The standard compliant class is only a wrapper class. It is *not* a subclass
of ndarray.
"""
from __future__ import annotations
import operator
from enum import IntEnum
from ._creation_functions import asarray
from ._dtypes import (
_all_dtypes,
_boolean_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_floating_dtypes,
_numeric_dtypes,
_result_type,
_dtype_categories,
)
from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex
import types
if TYPE_CHECKING:
from ._typing import Any, PyCapsule, Device, Dtype
import numpy.typing as npt
import numpy as np
from numpy import array_api
class Array:
"""
n-d array object for the array API namespace.
See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more
information.
This is a wrapper around numpy.ndarray that restricts the usage to only
those things that are required by the array API namespace. Note,
attributes on this object that start with a single underscore are not part
of the API specification and should only be used internally. This object
should not be constructed directly. Rather, use one of the creation
functions, such as asarray().
"""
_array: np.ndarray
# Use a custom constructor instead of __init__, as manually initializing
# this class is not supported API.
@classmethod
def _new(cls, x, /):
"""
This is a private method for initializing the array API Array
object.
Functions outside of the array_api submodule should not use this
method. Use one of the creation functions instead, such as
``asarray``.
"""
obj = super().__new__(cls)
# Note: The spec does not have array scalars, only 0-D arrays.
if isinstance(x, np.generic):
# Convert the array scalar to a 0-D array
x = np.asarray(x)
if x.dtype not in _all_dtypes:
raise TypeError(
f"The array_api namespace does not support the dtype '{x.dtype}'"
)
obj._array = x
return obj
# Prevent Array() from working
def __new__(cls, *args, **kwargs):
raise TypeError(
"The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
)
# These functions are not required by the spec, but are implemented for
# the sake of usability.
def __str__(self: Array, /) -> str:
"""
Performs the operation __str__.
"""
return self._array.__str__().replace("array", "Array")
def __repr__(self: Array, /) -> str:
"""
Performs the operation __repr__.
"""
suffix = f", dtype={self.dtype.name})"
if 0 in self.shape:
prefix = "empty("
mid = str(self.shape)
else:
prefix = "Array("
mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
return prefix + mid + suffix
# This function is not required by the spec, but we implement it here for
# convenience so that np.asarray(np.array_api.Array) will work.
def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
"""
Warning: this method is NOT part of the array API spec. Implementers
of other libraries need not include it, and users should not assume it
will be present in other implementations.
"""
return np.asarray(self._array, dtype=dtype)
# These are various helper functions to make the array behavior match the
# spec in places where it either deviates from or is more strict than
# NumPy behavior
def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:
"""
Helper function for operators to only allow specific input dtypes
Use like
other = self._check_allowed_dtypes(other, 'numeric', '__add__')
if other is NotImplemented:
return other
"""
if self.dtype not in _dtype_categories[dtype_category]:
raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
if isinstance(other, (int, float, bool)):
other = self._promote_scalar(other)
elif isinstance(other, Array):
if other.dtype not in _dtype_categories[dtype_category]:
raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
else:
return NotImplemented
# This will raise TypeError for type combinations that are not allowed
# to promote in the spec (even if the NumPy array operator would
# promote them).
res_dtype = _result_type(self.dtype, other.dtype)
if op.startswith("__i"):
# Note: NumPy will allow in-place operators in some cases where
# the type promoted operator does not match the left-hand side
# operand. For example,
# >>> a = np.array(1, dtype=np.int8)
# >>> a += np.array(1, dtype=np.int16)
# The spec explicitly disallows this.
if res_dtype != self.dtype:
raise TypeError(
f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
)
return other
# Helper function to match the type promotion rules in the spec
def _promote_scalar(self, scalar):
"""
Returns a promoted version of a Python scalar appropriate for use with
operations on self.
This may raise an OverflowError in cases where the scalar is an
integer that is too large to fit in a NumPy integer dtype, or
TypeError when the scalar type is incompatible with the dtype of self.
"""
# Note: Only Python scalar types that match the array dtype are
# allowed.
if isinstance(scalar, bool):
if self.dtype not in _boolean_dtypes:
raise TypeError(
"Python bool scalars can only be promoted with bool arrays"
)
elif isinstance(scalar, int):
if self.dtype in _boolean_dtypes:
raise TypeError(
"Python int scalars cannot be promoted with bool arrays"
)
elif isinstance(scalar, float):
if self.dtype not in _floating_dtypes:
raise TypeError(
"Python float scalars can only be promoted with floating-point arrays."
)
else:
raise TypeError("'scalar' must be a Python scalar")
# Note: scalars are unconditionally cast to the same dtype as the
# array.
# Note: the spec only specifies integer-dtype/int promotion
# behavior for integers within the bounds of the integer dtype.
# Outside of those bounds we use the default NumPy behavior (either
# cast or raise OverflowError).
return Array._new(np.array(scalar, self.dtype))
@staticmethod
def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:
"""
Normalize inputs to two arg functions to fix type promotion rules
NumPy deviates from the spec type promotion rules in cases where one
argument is 0-dimensional and the other is not. For example:
>>> import numpy as np
>>> a = np.array([1.0], dtype=np.float32)
>>> b = np.array(1.0, dtype=np.float64)
>>> np.add(a, b) # The spec says this should be float64
array([2.], dtype=float32)
To fix this, we add a dimension to the 0-dimension array before passing it
through. This works because a dimension would be added anyway from
broadcasting, so the resulting shape is the same, but this prevents NumPy
from not promoting the dtype.
"""
# Another option would be to use signature=(x1.dtype, x2.dtype, None),
# but that only works for ufuncs, so we would have to call the ufuncs
# directly in the operator methods. One should also note that this
# sort of trick wouldn't work for functions like searchsorted, which
# don't do normal broadcasting, but there aren't any functions like
# that in the array API namespace.
if x1.ndim == 0 and x2.ndim != 0:
# The _array[None] workaround was chosen because it is relatively
# performant. broadcast_to(x1._array, x2.shape) is much slower. We
# could also manually type promote x2, but that is more complicated
# and about the same performance as this.
x1 = Array._new(x1._array[None])
elif x2.ndim == 0 and x1.ndim != 0:
x2 = Array._new(x2._array[None])
return (x1, x2)
# Note: A large fraction of allowed indices are disallowed here (see the
# docstring below)
def _validate_index(self, key):
"""
Validate an index according to the array API.
The array API specification only requires a subset of indices that are
supported by NumPy. This function will reject any index that is
allowed by NumPy but not required by the array API specification. We
always raise ``IndexError`` on such indices (the spec does not require
any specific behavior on them, but this makes the NumPy array API
namespace a minimal implementation of the spec). See
https://data-apis.org/array-api/latest/API_specification/indexing.html
for the full list of required indexing behavior
This function raises IndexError if the index ``key`` is invalid. It
only raises ``IndexError`` on indices that are not already rejected by
NumPy, as NumPy will already raise the appropriate error on such
indices. ``shape`` may be None, in which case, only cases that are
independent of the array shape are checked.
The following cases are allowed by NumPy, but not specified by the array
API specification:
- Indices to not include an implicit ellipsis at the end. That is,
every axis of an array must be explicitly indexed or an ellipsis
included. This behaviour is sometimes referred to as flat indexing.
- The start and stop of a slice may not be out of bounds. In
particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
following are allowed:
- ``i`` or ``j`` omitted (``None``).
- ``-n <= i <= max(0, n - 1)``.
- For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
- For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
- Boolean array indices are not allowed as part of a larger tuple
index.
- Integer array indices are not allowed (with the exception of 0-D
arrays, which are treated the same as scalars).
Additionally, it should be noted that indices that would return a
scalar in NumPy will return a 0-D array. Array scalars are not allowed
in the specification, only 0-D arrays. This is done in the
``Array._new`` constructor, not this function.
"""
_key = key if isinstance(key, tuple) else (key,)
for i in _key:
if isinstance(i, bool) or not (
isinstance(i, SupportsIndex) # i.e. ints
or isinstance(i, slice)
or i == Ellipsis
or i is None
or isinstance(i, Array)
or isinstance(i, np.ndarray)
):
raise IndexError(
f"Single-axes index {i} has {type(i)=}, but only "
"integers, slices (:), ellipsis (...), newaxis (None), "
"zero-dimensional integer arrays and boolean arrays "
"are specified in the Array API."
)
nonexpanding_key = []
single_axes = []
n_ellipsis = 0
key_has_mask = False
for i in _key:
if i is not None:
nonexpanding_key.append(i)
if isinstance(i, Array) or isinstance(i, np.ndarray):
if i.dtype in _boolean_dtypes:
key_has_mask = True
single_axes.append(i)
else:
# i must not be an array here, to avoid elementwise equals
if i == Ellipsis:
n_ellipsis += 1
else:
single_axes.append(i)
n_single_axes = len(single_axes)
if n_ellipsis > 1:
return # handled by ndarray
elif n_ellipsis == 0:
# Note boolean masks must be the sole index, which we check for
# later on.
if not key_has_mask and n_single_axes < self.ndim:
raise IndexError(
f"{self.ndim=}, but the multi-axes index only specifies "
f"{n_single_axes} dimensions. If this was intentional, "
"add a trailing ellipsis (...) which expands into as many "
"slices (:) as necessary - this is what np.ndarray arrays "
"implicitly do, but such flat indexing behaviour is not "
"specified in the Array API."
)
if n_ellipsis == 0:
indexed_shape = self.shape
else:
ellipsis_start = None
for pos, i in enumerate(nonexpanding_key):
if not (isinstance(i, Array) or isinstance(i, np.ndarray)):
if i == Ellipsis:
ellipsis_start = pos
break
assert ellipsis_start is not None # sanity check
ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)
indexed_shape = (
self.shape[:ellipsis_start] + self.shape[ellipsis_end:]
)
for i, side in zip(single_axes, indexed_shape):
if isinstance(i, slice):
if side == 0:
f_range = "0 (or None)"
else:
f_range = f"between -{side} and {side - 1} (or None)"
if i.start is not None:
try:
start = operator.index(i.start)
except TypeError:
pass # handled by ndarray
else:
if not (-side <= start <= side):
raise IndexError(
f"Slice {i} contains {start=}, but should be "
f"{f_range} for an axis of size {side} "
"(out-of-bounds starts are not specified in "
"the Array API)"
)
if i.stop is not None:
try:
stop = operator.index(i.stop)
except TypeError:
pass # handled by ndarray
else:
if not (-side <= stop <= side):
raise IndexError(
f"Slice {i} contains {stop=}, but should be "
f"{f_range} for an axis of size {side} "
"(out-of-bounds stops are not specified in "
"the Array API)"
)
elif isinstance(i, Array):
if i.dtype in _boolean_dtypes and len(_key) != 1:
assert isinstance(key, tuple) # sanity check
raise IndexError(
f"Single-axes index {i} is a boolean array and "
f"{len(key)=}, but masking is only specified in the "
"Array API when the array is the sole index."
)
elif i.dtype in _integer_dtypes and i.ndim != 0:
raise IndexError(
f"Single-axes index {i} is a non-zero-dimensional "
"integer array, but advanced integer indexing is not "
"specified in the Array API."
)
elif isinstance(i, tuple):
raise IndexError(
f"Single-axes index {i} is a tuple, but nested tuple "
"indices are not specified in the Array API."
)
# Everything below this line is required by the spec.
def __abs__(self: Array, /) -> Array:
"""
Performs the operation __abs__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __abs__")
res = self._array.__abs__()
return self.__class__._new(res)
def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __add__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__add__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__add__(other._array)
return self.__class__._new(res)
def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __and__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__and__(other._array)
return self.__class__._new(res)
def __array_namespace__(
self: Array, /, *, api_version: Optional[str] = None
) -> types.ModuleType:
if api_version is not None and not api_version.startswith("2021."):
raise ValueError(f"Unrecognized array API version: {api_version!r}")
return array_api
def __bool__(self: Array, /) -> bool:
"""
Performs the operation __bool__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("bool is only allowed on arrays with 0 dimensions")
if self.dtype not in _boolean_dtypes:
raise ValueError("bool is only allowed on boolean arrays")
res = self._array.__bool__()
return res
def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
"""
Performs the operation __dlpack__.
"""
return self._array.__dlpack__(stream=stream)
def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
"""
Performs the operation __dlpack_device__.
"""
# Note: device support is required for this
return self._array.__dlpack_device__()
def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
"""
Performs the operation __eq__.
"""
# Even though "all" dtypes are allowed, we still require them to be
# promotable with each other.
other = self._check_allowed_dtypes(other, "all", "__eq__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__eq__(other._array)
return self.__class__._new(res)
def __float__(self: Array, /) -> float:
"""
Performs the operation __float__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("float is only allowed on arrays with 0 dimensions")
if self.dtype not in _floating_dtypes:
raise ValueError("float is only allowed on floating-point arrays")
res = self._array.__float__()
return res
def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __floordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__floordiv__(other._array)
return self.__class__._new(res)
def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ge__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ge__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ge__(other._array)
return self.__class__._new(res)
def __getitem__(
self: Array,
key: Union[
int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
],
/,
) -> Array:
"""
Performs the operation __getitem__.
"""
# Note: Only indices required by the spec are allowed. See the
# docstring of _validate_index
self._validate_index(key)
if isinstance(key, Array):
# Indexing self._array with array_api arrays can be erroneous
key = key._array
res = self._array.__getitem__(key)
return self._new(res)
def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __gt__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__gt__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__gt__(other._array)
return self.__class__._new(res)
def __int__(self: Array, /) -> int:
"""
Performs the operation __int__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("int is only allowed on arrays with 0 dimensions")
if self.dtype not in _integer_dtypes:
raise ValueError("int is only allowed on integer arrays")
res = self._array.__int__()
return res
def __index__(self: Array, /) -> int:
"""
Performs the operation __index__.
"""
res = self._array.__index__()
return res
def __invert__(self: Array, /) -> Array:
"""
Performs the operation __invert__.
"""
if self.dtype not in _integer_or_boolean_dtypes:
raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
res = self._array.__invert__()
return self.__class__._new(res)
def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __le__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__le__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__le__(other._array)
return self.__class__._new(res)
def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __lshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__lshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__lshift__(other._array)
return self.__class__._new(res)
def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __lt__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__lt__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__lt__(other._array)
return self.__class__._new(res)
def __matmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __matmul__.
"""
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
if other is NotImplemented:
return other
res = self._array.__matmul__(other._array)
return self.__class__._new(res)
def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __mod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__mod__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__mod__(other._array)
return self.__class__._new(res)
def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __mul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__mul__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__mul__(other._array)
return self.__class__._new(res)
def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
"""
Performs the operation __ne__.
"""
other = self._check_allowed_dtypes(other, "all", "__ne__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ne__(other._array)
return self.__class__._new(res)
def __neg__(self: Array, /) -> Array:
"""
Performs the operation __neg__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __neg__")
res = self._array.__neg__()
return self.__class__._new(res)
def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __or__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__or__(other._array)
return self.__class__._new(res)
def __pos__(self: Array, /) -> Array:
"""
Performs the operation __pos__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __pos__")
res = self._array.__pos__()
return self.__class__._new(res)
def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __pow__.
"""
from ._elementwise_functions import pow
other = self._check_allowed_dtypes(other, "numeric", "__pow__")
if other is NotImplemented:
return other
# Note: NumPy's __pow__ does not follow type promotion rules for 0-d
# arrays, so we use pow() here instead.
return pow(self, other)
def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rshift__(other._array)
return self.__class__._new(res)
def __setitem__(
self,
key: Union[
int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
],
value: Union[int, float, bool, Array],
/,
) -> None:
"""
Performs the operation __setitem__.
"""
# Note: Only indices required by the spec are allowed. See the
# docstring of _validate_index
self._validate_index(key)
if isinstance(key, Array):
# Indexing self._array with array_api arrays can be erroneous
key = key._array
self._array.__setitem__(key, asarray(value)._array)
def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __sub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__sub__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__sub__(other._array)
return self.__class__._new(res)
# PEP 484 requires int to be a subtype of float, but __truediv__ should
# not accept int.
def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __truediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__truediv__(other._array)
return self.__class__._new(res)
def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __xor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__xor__(other._array)
return self.__class__._new(res)
def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __iadd__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
if other is NotImplemented:
return other
self._array.__iadd__(other._array)
return self
def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __radd__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__radd__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__radd__(other._array)
return self.__class__._new(res)
def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __iand__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
if other is NotImplemented:
return other
self._array.__iand__(other._array)
return self
def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __rand__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rand__(other._array)
return self.__class__._new(res)
def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ifloordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
if other is NotImplemented:
return other
self._array.__ifloordiv__(other._array)
return self
def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rfloordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rfloordiv__(other._array)
return self.__class__._new(res)
def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __ilshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
if other is NotImplemented:
return other
self._array.__ilshift__(other._array)
return self
def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rlshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rlshift__(other._array)
return self.__class__._new(res)
def __imatmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __imatmul__.
"""
# Note: NumPy does not implement __imatmul__.
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
if other is NotImplemented:
return other
# __imatmul__ can only be allowed when it would not change the shape
# of self.
other_shape = other.shape
if self.shape == () or other_shape == ():
raise ValueError("@= requires at least one dimension")
if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
raise ValueError("@= cannot change the shape of the input array")
self._array[:] = self._array.__matmul__(other._array)
return self
def __rmatmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __rmatmul__.
"""
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
if other is NotImplemented:
return other
res = self._array.__rmatmul__(other._array)
return self.__class__._new(res)
def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __imod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__imod__")
if other is NotImplemented:
return other
self._array.__imod__(other._array)
return self
def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rmod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rmod__(other._array)
return self.__class__._new(res)
def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __imul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__imul__")
if other is NotImplemented:
return other
self._array.__imul__(other._array)
return self
def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rmul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rmul__(other._array)
return self.__class__._new(res)
def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ior__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
if other is NotImplemented:
return other
self._array.__ior__(other._array)
return self
def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ror__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ror__(other._array)
return self.__class__._new(res)
def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ipow__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ipow__")
if other is NotImplemented:
return other
self._array.__ipow__(other._array)
return self
def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rpow__.
"""
from ._elementwise_functions import pow
other = self._check_allowed_dtypes(other, "numeric", "__rpow__")
if other is NotImplemented:
return other
# Note: NumPy's __pow__ does not follow the spec type promotion rules
# for 0-d arrays, so we use pow() here instead.
return pow(other, self)
def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __irshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__irshift__")
if other is NotImplemented:
return other
self._array.__irshift__(other._array)
return self
def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rrshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rrshift__(other._array)
return self.__class__._new(res)
def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __isub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__isub__")
if other is NotImplemented:
return other
self._array.__isub__(other._array)
return self
def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rsub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rsub__(other._array)
return self.__class__._new(res)
def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __itruediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
if other is NotImplemented:
return other
self._array.__itruediv__(other._array)
return self
def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __rtruediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rtruediv__(other._array)
return self.__class__._new(res)
def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ixor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
if other is NotImplemented:
return other
self._array.__ixor__(other._array)
return self
def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __rxor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rxor__(other._array)
return self.__class__._new(res)
def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
if device == 'cpu':
return self
raise ValueError(f"Unsupported device {device!r}")
@property
def dtype(self) -> Dtype:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.
See its docstring for more information.
"""
return self._array.dtype
@property
def device(self) -> Device:
return "cpu"
# Note: mT is new in array API spec (see matrix_transpose)
@property
def mT(self) -> Array:
from .linalg import matrix_transpose
return matrix_transpose(self)
@property
def ndim(self) -> int:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.
See its docstring for more information.
"""
return self._array.ndim
@property
def shape(self) -> Tuple[int, ...]:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.
See its docstring for more information.
"""
return self._array.shape
@property
def size(self) -> int:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.
See its docstring for more information.
"""
return self._array.size
@property
def T(self) -> Array:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.
See its docstring for more information.
"""
# Note: T only works on 2-dimensional arrays. See the corresponding
# note in the specification:
# https://data-apis.org/array-api/latest/API_specification/array_object.html#t
if self.ndim != 2:
raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
return self.__class__._new(self._array.T)
| 43,226 | Python | 37.630027 | 151 | 0.556262 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/linalg.py | from __future__ import annotations
from ._dtypes import _floating_dtypes, _numeric_dtypes
from ._array_object import Array
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ._typing import Literal, Optional, Sequence, Tuple, Union
from typing import NamedTuple
import numpy.linalg
import numpy as np
class EighResult(NamedTuple):
eigenvalues: Array
eigenvectors: Array
class QRResult(NamedTuple):
Q: Array
R: Array
class SlogdetResult(NamedTuple):
sign: Array
logabsdet: Array
class SVDResult(NamedTuple):
U: Array
S: Array
Vh: Array
# Note: the inclusion of the upper keyword is different from
# np.linalg.cholesky, which does not have it.
def cholesky(x: Array, /, *, upper: bool = False) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.cholesky.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in cholesky')
L = np.linalg.cholesky(x._array)
if upper:
return Array._new(L).mT
return Array._new(L)
# Note: cross is the numpy top-level namespace, not np.linalg
def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
"""
Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError('Only numeric dtypes are allowed in cross')
# Note: this is different from np.cross(), which broadcasts
if x1.shape != x2.shape:
raise ValueError('x1 and x2 must have the same shape')
if x1.ndim == 0:
raise ValueError('cross() requires arrays of dimension at least 1')
# Note: this is different from np.cross(), which allows dimension 2
if x1.shape[axis] != 3:
raise ValueError('cross() dimension must equal 3')
return Array._new(np.cross(x1._array, x2._array, axis=axis))
def det(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.det <numpy.linalg.det>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.det.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in det')
return Array._new(np.linalg.det(x._array))
# Note: diagonal is the numpy top-level namespace, not np.linalg
def diagonal(x: Array, /, *, offset: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.diagonal <numpy.diagonal>`.
See its docstring for more information.
"""
# Note: diagonal always operates on the last two axes, whereas np.diagonal
# operates on the first two axes by default
return Array._new(np.diagonal(x._array, offset=offset, axis1=-2, axis2=-1))
def eigh(x: Array, /) -> EighResult:
"""
Array API compatible wrapper for :py:func:`np.linalg.eigh <numpy.linalg.eigh>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.eigh.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in eigh')
# Note: the return type here is a namedtuple, which is different from
# np.eigh, which only returns a tuple.
return EighResult(*map(Array._new, np.linalg.eigh(x._array)))
def eigvalsh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.eigvalsh <numpy.linalg.eigvalsh>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.eigvalsh.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in eigvalsh')
return Array._new(np.linalg.eigvalsh(x._array))
def inv(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.inv <numpy.linalg.inv>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.inv.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in inv')
return Array._new(np.linalg.inv(x._array))
# Note: matmul is the numpy top-level namespace but not in np.linalg
def matmul(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.matmul <numpy.matmul>`.
See its docstring for more information.
"""
# Note: the restriction to numeric dtypes only is different from
# np.matmul.
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError('Only numeric dtypes are allowed in matmul')
return Array._new(np.matmul(x1._array, x2._array))
# Note: the name here is different from norm(). The array API norm is split
# into matrix_norm and vector_norm().
# The type for ord should be Optional[Union[int, float, Literal[np.inf,
# -np.inf, 'fro', 'nuc']]], but Literal does not support floating-point
# literals.
def matrix_norm(x: Array, /, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.norm.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in matrix_norm')
return Array._new(np.linalg.norm(x._array, axis=(-2, -1), keepdims=keepdims, ord=ord))
def matrix_power(x: Array, n: int, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.matrix_power <numpy.matrix_power>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.matrix_power.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed for the first argument of matrix_power')
# np.matrix_power already checks if n is an integer
return Array._new(np.linalg.matrix_power(x._array, n))
# Note: the keyword argument name rtol is different from np.linalg.matrix_rank
def matrix_rank(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array:
"""
Array API compatible wrapper for :py:func:`np.matrix_rank <numpy.matrix_rank>`.
See its docstring for more information.
"""
# Note: this is different from np.linalg.matrix_rank, which supports 1
# dimensional arrays.
if x.ndim < 2:
raise np.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
S = np.linalg.svd(x._array, compute_uv=False)
if rtol is None:
tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * np.finfo(S.dtype).eps
else:
if isinstance(rtol, Array):
rtol = rtol._array
# Note: this is different from np.linalg.matrix_rank, which does not multiply
# the tolerance by the largest singular value.
tol = S.max(axis=-1, keepdims=True)*np.asarray(rtol)[..., np.newaxis]
return Array._new(np.count_nonzero(S > tol, axis=-1))
# Note: this function is new in the array API spec. Unlike transpose, it only
# transposes the last two axes.
def matrix_transpose(x: Array, /) -> Array:
if x.ndim < 2:
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
return Array._new(np.swapaxes(x._array, -1, -2))
# Note: outer is the numpy top-level namespace, not np.linalg
def outer(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.outer <numpy.outer>`.
See its docstring for more information.
"""
# Note: the restriction to numeric dtypes only is different from
# np.outer.
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError('Only numeric dtypes are allowed in outer')
# Note: the restriction to only 1-dim arrays is different from np.outer
if x1.ndim != 1 or x2.ndim != 1:
raise ValueError('The input arrays to outer must be 1-dimensional')
return Array._new(np.outer(x1._array, x2._array))
# Note: the keyword argument name rtol is different from np.linalg.pinv
def pinv(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.pinv <numpy.linalg.pinv>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.pinv.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in pinv')
# Note: this is different from np.linalg.pinv, which does not multiply the
# default tolerance by max(M, N).
if rtol is None:
rtol = max(x.shape[-2:]) * np.finfo(x.dtype).eps
return Array._new(np.linalg.pinv(x._array, rcond=rtol))
def qr(x: Array, /, *, mode: Literal['reduced', 'complete'] = 'reduced') -> QRResult:
"""
Array API compatible wrapper for :py:func:`np.linalg.qr <numpy.linalg.qr>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.qr.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in qr')
# Note: the return type here is a namedtuple, which is different from
# np.linalg.qr, which only returns a tuple.
return QRResult(*map(Array._new, np.linalg.qr(x._array, mode=mode)))
def slogdet(x: Array, /) -> SlogdetResult:
"""
Array API compatible wrapper for :py:func:`np.linalg.slogdet <numpy.linalg.slogdet>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.slogdet.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in slogdet')
# Note: the return type here is a namedtuple, which is different from
# np.linalg.slogdet, which only returns a tuple.
return SlogdetResult(*map(Array._new, np.linalg.slogdet(x._array)))
# Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a
# vector when it is exactly 1-dimensional. All other cases treat x2 as a stack
# of matrices. The np.linalg.solve behavior of allowing stacks of both
# matrices and vectors is ambiguous c.f.
# https://github.com/numpy/numpy/issues/15349 and
# https://github.com/data-apis/array-api/issues/285.
# To workaround this, the below is the code from np.linalg.solve except
# only calling solve1 in the exactly 1D case.
def _solve(a, b):
from ..linalg.linalg import (_makearray, _assert_stacked_2d,
_assert_stacked_square, _commonType,
isComplexType, get_linalg_error_extobj,
_raise_linalgerror_singular)
from ..linalg import _umath_linalg
a, _ = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# This part is different from np.linalg.solve
if b.ndim == 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
# This does nothing currently but is left in because it will be relevant
# when complex dtype support is added to the spec in 2022.
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def solve(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.solve <numpy.linalg.solve>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.solve.
if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in solve')
return Array._new(_solve(x1._array, x2._array))
def svd(x: Array, /, *, full_matrices: bool = True) -> SVDResult:
"""
Array API compatible wrapper for :py:func:`np.linalg.svd <numpy.linalg.svd>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.svd.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in svd')
# Note: the return type here is a namedtuple, which is different from
# np.svd, which only returns a tuple.
return SVDResult(*map(Array._new, np.linalg.svd(x._array, full_matrices=full_matrices)))
# Note: svdvals is not in NumPy (but it is in SciPy). It is equivalent to
# np.linalg.svd(compute_uv=False).
def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]:
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in svdvals')
return Array._new(np.linalg.svd(x._array, compute_uv=False))
# Note: tensordot is the numpy top-level namespace but not in np.linalg
# Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like.
def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2) -> Array:
# Note: the restriction to numeric dtypes only is different from
# np.tensordot.
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError('Only numeric dtypes are allowed in tensordot')
return Array._new(np.tensordot(x1._array, x2._array, axes=axes))
# Note: trace is the numpy top-level namespace, not np.linalg
def trace(x: Array, /, *, offset: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.trace <numpy.trace>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError('Only numeric dtypes are allowed in trace')
# Note: trace always operates on the last two axes, whereas np.trace
# operates on the first two axes by default
return Array._new(np.asarray(np.trace(x._array, offset=offset, axis1=-2, axis2=-1)))
# Note: vecdot is not in NumPy
def vecdot(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError('Only numeric dtypes are allowed in vecdot')
ndim = max(x1.ndim, x2.ndim)
x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape)
x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape)
if x1_shape[axis] != x2_shape[axis]:
raise ValueError("x1 and x2 must have the same size along the given axis")
x1_, x2_ = np.broadcast_arrays(x1._array, x2._array)
x1_ = np.moveaxis(x1_, axis, -1)
x2_ = np.moveaxis(x2_, axis, -1)
res = x1_[..., None, :] @ x2_[..., None]
return Array._new(res[..., 0, 0])
# Note: the name here is different from norm(). The array API norm is split
# into matrix_norm and vector_norm().
# The type for ord should be Optional[Union[int, float, Literal[np.inf,
# -np.inf]]] but Literal does not support floating-point literals.
def vector_norm(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> Array:
"""
Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`.
See its docstring for more information.
"""
# Note: the restriction to floating-point dtypes only is different from
# np.linalg.norm.
if x.dtype not in _floating_dtypes:
raise TypeError('Only floating-point dtypes are allowed in norm')
a = x._array
if axis is None:
a = a.flatten()
axis = 0
elif isinstance(axis, tuple):
# Note: The axis argument supports any number of axes, whereas norm()
# only supports a single axis for vector norm.
rest = tuple(i for i in range(a.ndim) if i not in axis)
newshape = axis + rest
a = np.transpose(a, newshape).reshape((np.prod([a.shape[i] for i in axis]), *[a.shape[i] for i in rest]))
axis = 0
return Array._new(np.linalg.norm(a, axis=axis, keepdims=keepdims, ord=ord))
__all__ = ['cholesky', 'cross', 'det', 'diagonal', 'eigh', 'eigvalsh', 'inv', 'matmul', 'matrix_norm', 'matrix_power', 'matrix_rank', 'matrix_transpose', 'outer', 'pinv', 'qr', 'slogdet', 'solve', 'svd', 'svdvals', 'tensordot', 'trace', 'vecdot', 'vector_norm']
| 16,964 | Python | 39.011792 | 261 | 0.669359 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/__init__.py | """
A NumPy sub-namespace that conforms to the Python array API standard.
This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
is still considered experimental, and will issue a warning when imported.
This is a proof-of-concept namespace that wraps the corresponding NumPy
functions to give a conforming implementation of the Python array API standard
(https://data-apis.github.io/array-api/latest/). The standard is currently in
an RFC phase and comments on it are both welcome and encouraged. Comments
should be made either at https://github.com/data-apis/array-api or at
https://github.com/data-apis/consortium-feedback/discussions.
NumPy already follows the proposed spec for the most part, so this module
serves mostly as a thin wrapper around it. However, NumPy also implements a
lot of behavior that is not included in the spec, so this serves as a
restricted subset of the API. Only those functions that are part of the spec
are included in this namespace, and all functions are given with the exact
signature given in the spec, including the use of position-only arguments, and
omitting any extra keyword arguments implemented by NumPy but not part of the
spec. The behavior of some functions is also modified from the NumPy behavior
to conform to the standard. Note that the underlying array object itself is
wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
is implemented in pure Python with no C extensions.
The array API spec is designed as a "minimal API subset" and explicitly allows
libraries to include behaviors not specified by it. But users of this module
that intend to write portable code should be aware that only those behaviors
that are listed in the spec are guaranteed to be implemented across libraries.
Consequently, the NumPy implementation was chosen to be both conforming and
minimal, so that users can use this implementation of the array API namespace
and be sure that behaviors that it defines will be available in conforming
namespaces from other libraries.
A few notes about the current state of this submodule:
- There is a test suite that tests modules against the array API standard at
https://github.com/data-apis/array-api-tests. The test suite is still a work
in progress, but the existing tests pass on this module, with a few
exceptions:
- DLPack support (see https://github.com/data-apis/array-api/pull/106) is
not included here, as it requires a full implementation in NumPy proper
first.
The test suite is not yet complete, and even the tests that exist are not
guaranteed to give a comprehensive coverage of the spec. Therefore, when
reviewing and using this submodule, you should refer to the standard
documents themselves. There are some tests in numpy.array_api.tests, but
they primarily focus on things that are not tested by the official array API
test suite.
- There is a custom array object, numpy.array_api.Array, which is returned by
all functions in this module. All functions in the array API namespace
implicitly assume that they will only receive this object as input. The only
way to create instances of this object is to use one of the array creation
functions. It does not have a public constructor on the object itself. The
object is a small wrapper class around numpy.ndarray. The main purpose of it
is to restrict the namespace of the array object to only those dtypes and
only those methods that are required by the spec, as well as to limit/change
certain behavior that differs in the spec. In particular:
- The array API namespace does not have scalar objects, only 0-D arrays.
Operations on Array that would create a scalar in NumPy create a 0-D
array.
- Indexing: Only a subset of indices supported by NumPy are required by the
spec. The Array object restricts indexing to only allow those types of
indices that are required by the spec. See the docstring of the
numpy.array_api.Array._validate_indices helper function for more
information.
- Type promotion: Some type promotion rules are different in the spec. In
particular, the spec does not have any value-based casting. The spec also
does not require cross-kind casting, like integer -> floating-point. Only
those promotions that are explicitly required by the array API
specification are allowed in this module. See NEP 47 for more info.
- Functions do not automatically call asarray() on their input, and will not
work if the input type is not Array. The exception is array creation
functions, and Python operators on the Array object, which accept Python
scalars of the same type as the array dtype.
- All functions include type annotations, corresponding to those given in the
spec (see _typing.py for definitions of some custom types). These do not
currently fully pass mypy due to some limitations in mypy.
- Dtype objects are just the NumPy dtype objects, e.g., float64 =
np.dtype('float64'). The spec does not require any behavior on these dtype
objects other than that they be accessible by name and be comparable by
equality, but it was considered too much extra complexity to create custom
objects to represent dtypes.
- All places where the implementations in this submodule are known to deviate
from their corresponding functions in NumPy are marked with "# Note:"
comments.
Still TODO in this module are:
- DLPack support for numpy.ndarray is still in progress. See
https://github.com/numpy/numpy/pull/19083.
- The copy=False keyword argument to asarray() is not yet implemented. This
requires support in numpy.asarray() first.
- Some functions are not yet fully tested in the array API test suite, and may
require updates that are not yet known until the tests are written.
- The spec is still in an RFC phase and may still have minor updates, which
will need to be reflected here.
- Complex number support in array API spec is planned but not yet finalized,
as are the fft extension and certain linear algebra functions such as eig
that require complex dtypes.
"""
import warnings
warnings.warn(
"The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
)
__array_api_version__ = "2021.12"
__all__ = ["__array_api_version__"]
from ._constants import e, inf, nan, pi
__all__ += ["e", "inf", "nan", "pi"]
from ._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
from_dlpack,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
tril,
triu,
zeros,
zeros_like,
)
__all__ += [
"asarray",
"arange",
"empty",
"empty_like",
"eye",
"from_dlpack",
"full",
"full_like",
"linspace",
"meshgrid",
"ones",
"ones_like",
"tril",
"triu",
"zeros",
"zeros_like",
]
from ._data_type_functions import (
astype,
broadcast_arrays,
broadcast_to,
can_cast,
finfo,
iinfo,
result_type,
)
__all__ += [
"astype",
"broadcast_arrays",
"broadcast_to",
"can_cast",
"finfo",
"iinfo",
"result_type",
]
from ._dtypes import (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
__all__ += [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"bool",
]
from ._elementwise_functions import (
abs,
acos,
acosh,
add,
asin,
asinh,
atan,
atan2,
atanh,
bitwise_and,
bitwise_left_shift,
bitwise_invert,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
ceil,
cos,
cosh,
divide,
equal,
exp,
expm1,
floor,
floor_divide,
greater,
greater_equal,
isfinite,
isinf,
isnan,
less,
less_equal,
log,
log1p,
log2,
log10,
logaddexp,
logical_and,
logical_not,
logical_or,
logical_xor,
multiply,
negative,
not_equal,
positive,
pow,
remainder,
round,
sign,
sin,
sinh,
square,
sqrt,
subtract,
tan,
tanh,
trunc,
)
__all__ += [
"abs",
"acos",
"acosh",
"add",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"ceil",
"cos",
"cosh",
"divide",
"equal",
"exp",
"expm1",
"floor",
"floor_divide",
"greater",
"greater_equal",
"isfinite",
"isinf",
"isnan",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"multiply",
"negative",
"not_equal",
"positive",
"pow",
"remainder",
"round",
"sign",
"sin",
"sinh",
"square",
"sqrt",
"subtract",
"tan",
"tanh",
"trunc",
]
# linalg is an extension in the array API spec, which is a sub-namespace. Only
# a subset of functions in it are imported into the top-level namespace.
from . import linalg
__all__ += ["linalg"]
from .linalg import matmul, tensordot, matrix_transpose, vecdot
__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
from ._manipulation_functions import (
concat,
expand_dims,
flip,
permute_dims,
reshape,
roll,
squeeze,
stack,
)
__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
from ._searching_functions import argmax, argmin, nonzero, where
__all__ += ["argmax", "argmin", "nonzero", "where"]
from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
from ._sorting_functions import argsort, sort
__all__ += ["argsort", "sort"]
from ._statistical_functions import max, mean, min, prod, std, sum, var
__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
from ._utility_functions import all, any
__all__ += ["all", "any"]
| 10,221 | Python | 26.042328 | 99 | 0.681832 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_data_type_functions.py | from __future__ import annotations
from ._array_object import Array
from ._dtypes import _all_dtypes, _result_type
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Tuple, Union
if TYPE_CHECKING:
from ._typing import Dtype
from collections.abc import Sequence
import numpy as np
# Note: astype is a function, not an array method as in NumPy.
def astype(x: Array, dtype: Dtype, /, *, copy: bool = True) -> Array:
if not copy and dtype == x.dtype:
return x
return Array._new(x._array.astype(dtype=dtype, copy=copy))
def broadcast_arrays(*arrays: Array) -> List[Array]:
"""
Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`.
See its docstring for more information.
"""
from ._array_object import Array
return [
Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
]
def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
"""
Array API compatible wrapper for :py:func:`np.broadcast_to <numpy.broadcast_to>`.
See its docstring for more information.
"""
from ._array_object import Array
return Array._new(np.broadcast_to(x._array, shape))
def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
"""
Array API compatible wrapper for :py:func:`np.can_cast <numpy.can_cast>`.
See its docstring for more information.
"""
if isinstance(from_, Array):
from_ = from_.dtype
elif from_ not in _all_dtypes:
raise TypeError(f"{from_=}, but should be an array_api array or dtype")
if to not in _all_dtypes:
raise TypeError(f"{to=}, but should be a dtype")
# Note: We avoid np.can_cast() as it has discrepancies with the array API,
# since NumPy allows cross-kind casting (e.g., NumPy allows bool -> int8).
# See https://github.com/numpy/numpy/issues/20870
try:
# We promote `from_` and `to` together. We then check if the promoted
# dtype is `to`, which indicates if `from_` can (up)cast to `to`.
dtype = _result_type(from_, to)
return to == dtype
except TypeError:
# _result_type() raises if the dtypes don't promote together
return False
# These are internal objects for the return types of finfo and iinfo, since
# the NumPy versions contain extra data that isn't part of the spec.
@dataclass
class finfo_object:
bits: int
# Note: The types of the float data here are float, whereas in NumPy they
# are scalars of the corresponding float dtype.
eps: float
max: float
min: float
smallest_normal: float
@dataclass
class iinfo_object:
bits: int
max: int
min: int
def finfo(type: Union[Dtype, Array], /) -> finfo_object:
"""
Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.
See its docstring for more information.
"""
fi = np.finfo(type)
# Note: The types of the float data here are float, whereas in NumPy they
# are scalars of the corresponding float dtype.
return finfo_object(
fi.bits,
float(fi.eps),
float(fi.max),
float(fi.min),
float(fi.smallest_normal),
)
def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
"""
Array API compatible wrapper for :py:func:`np.iinfo <numpy.iinfo>`.
See its docstring for more information.
"""
ii = np.iinfo(type)
return iinfo_object(ii.bits, ii.max, ii.min)
def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
"""
Array API compatible wrapper for :py:func:`np.result_type <numpy.result_type>`.
See its docstring for more information.
"""
# Note: we use a custom implementation that gives only the type promotions
# required by the spec rather than using np.result_type. NumPy implements
# too many extra type promotions like int64 + uint64 -> float64, and does
# value-based casting on scalar arrays.
A = []
for a in arrays_and_dtypes:
if isinstance(a, Array):
a = a.dtype
elif isinstance(a, np.ndarray) or a not in _all_dtypes:
raise TypeError("result_type() inputs must be array_api arrays or dtypes")
A.append(a)
if len(A) == 0:
raise ValueError("at least one array or dtype is required")
elif len(A) == 1:
return A[0]
else:
t = A[0]
for t2 in A[1:]:
t = _result_type(t, t2)
return t
| 4,480 | Python | 29.482993 | 93 | 0.641741 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_searching_functions.py | from __future__ import annotations
from ._array_object import Array
from ._dtypes import _result_type
from typing import Optional, Tuple
import numpy as np
def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
"""
Array API compatible wrapper for :py:func:`np.argmax <numpy.argmax>`.
See its docstring for more information.
"""
return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))
def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
"""
Array API compatible wrapper for :py:func:`np.argmin <numpy.argmin>`.
See its docstring for more information.
"""
return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))
def nonzero(x: Array, /) -> Tuple[Array, ...]:
"""
Array API compatible wrapper for :py:func:`np.nonzero <numpy.nonzero>`.
See its docstring for more information.
"""
return tuple(Array._new(i) for i in np.nonzero(x._array))
def where(condition: Array, x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.where <numpy.where>`.
See its docstring for more information.
"""
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.where(condition._array, x1._array, x2._array))
| 1,457 | Python | 29.374999 | 88 | 0.664379 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_constants.py | import numpy as np
e = np.e
inf = np.inf
nan = np.nan
pi = np.pi
| 66 | Python | 8.571427 | 18 | 0.621212 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_typing.py | """
This file defines the types for type annotations.
These names aren't part of the module namespace, but they are used in the
annotations in the function signatures. The functions in the module are only
valid for inputs that match the given type annotations.
"""
from __future__ import annotations
__all__ = [
"Array",
"Device",
"Dtype",
"SupportsDLPack",
"SupportsBufferProtocol",
"PyCapsule",
]
import sys
from typing import (
Any,
Literal,
Sequence,
Type,
Union,
TYPE_CHECKING,
TypeVar,
Protocol,
)
from ._array_object import Array
from numpy import (
dtype,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
)
_T_co = TypeVar("_T_co", covariant=True)
class NestedSequence(Protocol[_T_co]):
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
def __len__(self, /) -> int: ...
Device = Literal["cpu"]
if TYPE_CHECKING or sys.version_info >= (3, 9):
Dtype = dtype[Union[
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
]]
else:
Dtype = dtype
SupportsBufferProtocol = Any
PyCapsule = Any
class SupportsDLPack(Protocol):
def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule: ...
| 1,376 | Python | 17.36 | 76 | 0.595203 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_statistical_functions.py | from __future__ import annotations
from ._dtypes import (
_floating_dtypes,
_numeric_dtypes,
)
from ._array_object import Array
from ._creation_functions import asarray
from ._dtypes import float32, float64
from typing import TYPE_CHECKING, Optional, Tuple, Union
if TYPE_CHECKING:
from ._typing import Dtype
import numpy as np
def max(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> Array:
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in max")
return Array._new(np.max(x._array, axis=axis, keepdims=keepdims))
def mean(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> Array:
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in mean")
return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims))
def min(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> Array:
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in min")
return Array._new(np.min(x._array, axis=axis, keepdims=keepdims))
def prod(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype: Optional[Dtype] = None,
keepdims: bool = False,
) -> Array:
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in prod")
# Note: sum() and prod() always upcast float32 to float64 for dtype=None
# We need to do so here before computing the product to avoid overflow
if dtype is None and x.dtype == float32:
dtype = float64
return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims))
def std(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
) -> Array:
# Note: the keyword argument correction is different here
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in std")
return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims))
def sum(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype: Optional[Dtype] = None,
keepdims: bool = False,
) -> Array:
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in sum")
# Note: sum() and prod() always upcast integers to (u)int64 and float32 to
# float64 for dtype=None. `np.sum` does that too for integers, but not for
# float32, so we need to special-case it here
if dtype is None and x.dtype == float32:
dtype = float64
return Array._new(np.sum(x._array, axis=axis, dtype=dtype, keepdims=keepdims))
def var(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
) -> Array:
# Note: the keyword argument correction is different here
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in var")
return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))
| 3,378 | Python | 28.12931 | 86 | 0.64032 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_manipulation_functions.py | from __future__ import annotations
from ._array_object import Array
from ._data_type_functions import result_type
from typing import List, Optional, Tuple, Union
import numpy as np
# Note: the function name is different here
def concat(
arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.concatenate <numpy.concatenate>`.
See its docstring for more information.
"""
# Note: Casting rules here are different from the np.concatenate default
# (no for scalars with axis=None, no cross-kind casting)
dtype = result_type(*arrays)
arrays = tuple(a._array for a in arrays)
return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype))
def expand_dims(x: Array, /, *, axis: int) -> Array:
"""
Array API compatible wrapper for :py:func:`np.expand_dims <numpy.expand_dims>`.
See its docstring for more information.
"""
return Array._new(np.expand_dims(x._array, axis))
def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array:
"""
Array API compatible wrapper for :py:func:`np.flip <numpy.flip>`.
See its docstring for more information.
"""
return Array._new(np.flip(x._array, axis=axis))
# Note: The function name is different here (see also matrix_transpose).
# Unlike transpose(), the axes argument is required.
def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array:
"""
Array API compatible wrapper for :py:func:`np.transpose <numpy.transpose>`.
See its docstring for more information.
"""
return Array._new(np.transpose(x._array, axes))
def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array:
"""
Array API compatible wrapper for :py:func:`np.reshape <numpy.reshape>`.
See its docstring for more information.
"""
return Array._new(np.reshape(x._array, shape))
def roll(
x: Array,
/,
shift: Union[int, Tuple[int, ...]],
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
) -> Array:
"""
Array API compatible wrapper for :py:func:`np.roll <numpy.roll>`.
See its docstring for more information.
"""
return Array._new(np.roll(x._array, shift, axis=axis))
def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
"""
Array API compatible wrapper for :py:func:`np.squeeze <numpy.squeeze>`.
See its docstring for more information.
"""
return Array._new(np.squeeze(x._array, axis=axis))
def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:
"""
Array API compatible wrapper for :py:func:`np.stack <numpy.stack>`.
See its docstring for more information.
"""
# Call result type here just to raise on disallowed type combinations
result_type(*arrays)
arrays = tuple(a._array for a in arrays)
return Array._new(np.stack(arrays, axis=axis))
| 2,944 | Python | 29.05102 | 87 | 0.648098 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_set_functions.py | from __future__ import annotations
from ._array_object import Array
from typing import NamedTuple
import numpy as np
# Note: np.unique() is split into four functions in the array API:
# unique_all, unique_counts, unique_inverse, and unique_values (this is done
# to remove polymorphic return types).
# Note: The various unique() functions are supposed to return multiple NaNs.
# This does not match the NumPy behavior, however, this is currently left as a
# TODO in this implementation as this behavior may be reverted in np.unique().
# See https://github.com/numpy/numpy/issues/20326.
# Note: The functions here return a namedtuple (np.unique() returns a normal
# tuple).
class UniqueAllResult(NamedTuple):
values: Array
indices: Array
inverse_indices: Array
counts: Array
class UniqueCountsResult(NamedTuple):
values: Array
counts: Array
class UniqueInverseResult(NamedTuple):
values: Array
inverse_indices: Array
def unique_all(x: Array, /) -> UniqueAllResult:
"""
Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
See its docstring for more information.
"""
values, indices, inverse_indices, counts = np.unique(
x._array,
return_counts=True,
return_index=True,
return_inverse=True,
)
# np.unique() flattens inverse indices, but they need to share x's shape
# See https://github.com/numpy/numpy/issues/20638
inverse_indices = inverse_indices.reshape(x.shape)
return UniqueAllResult(
Array._new(values),
Array._new(indices),
Array._new(inverse_indices),
Array._new(counts),
)
def unique_counts(x: Array, /) -> UniqueCountsResult:
res = np.unique(
x._array,
return_counts=True,
return_index=False,
return_inverse=False,
)
return UniqueCountsResult(*[Array._new(i) for i in res])
def unique_inverse(x: Array, /) -> UniqueInverseResult:
"""
Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
See its docstring for more information.
"""
values, inverse_indices = np.unique(
x._array,
return_counts=False,
return_index=False,
return_inverse=True,
)
# np.unique() flattens inverse indices, but they need to share x's shape
# See https://github.com/numpy/numpy/issues/20638
inverse_indices = inverse_indices.reshape(x.shape)
return UniqueInverseResult(Array._new(values), Array._new(inverse_indices))
def unique_values(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
See its docstring for more information.
"""
res = np.unique(
x._array,
return_counts=False,
return_index=False,
return_inverse=False,
)
return Array._new(res)
| 2,848 | Python | 26.660194 | 79 | 0.668188 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_elementwise_functions.py | from __future__ import annotations
from ._dtypes import (
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_numeric_dtypes,
_result_type,
)
from ._array_object import Array
import numpy as np
def abs(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.abs <numpy.abs>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in abs")
return Array._new(np.abs(x._array))
# Note: the function name is different here
def acos(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arccos <numpy.arccos>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in acos")
return Array._new(np.arccos(x._array))
# Note: the function name is different here
def acosh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arccosh <numpy.arccosh>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in acosh")
return Array._new(np.arccosh(x._array))
def add(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.add <numpy.add>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in add")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.add(x1._array, x2._array))
# Note: the function name is different here
def asin(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arcsin <numpy.arcsin>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in asin")
return Array._new(np.arcsin(x._array))
# Note: the function name is different here
def asinh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arcsinh <numpy.arcsinh>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in asinh")
return Array._new(np.arcsinh(x._array))
# Note: the function name is different here
def atan(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arctan <numpy.arctan>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in atan")
return Array._new(np.arctan(x._array))
# Note: the function name is different here
def atan2(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arctan2 <numpy.arctan2>`.
See its docstring for more information.
"""
if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in atan2")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.arctan2(x1._array, x2._array))
# Note: the function name is different here
def atanh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.arctanh <numpy.arctanh>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in atanh")
return Array._new(np.arctanh(x._array))
def bitwise_and(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.bitwise_and <numpy.bitwise_and>`.
See its docstring for more information.
"""
if (
x1.dtype not in _integer_or_boolean_dtypes
or x2.dtype not in _integer_or_boolean_dtypes
):
raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.bitwise_and(x1._array, x2._array))
# Note: the function name is different here
def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.left_shift <numpy.left_shift>`.
See its docstring for more information.
"""
if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
# Note: bitwise_left_shift is only defined for x2 nonnegative.
if np.any(x2._array < 0):
raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
return Array._new(np.left_shift(x1._array, x2._array))
# Note: the function name is different here
def bitwise_invert(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.invert <numpy.invert>`.
See its docstring for more information.
"""
if x.dtype not in _integer_or_boolean_dtypes:
raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
return Array._new(np.invert(x._array))
def bitwise_or(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.bitwise_or <numpy.bitwise_or>`.
See its docstring for more information.
"""
if (
x1.dtype not in _integer_or_boolean_dtypes
or x2.dtype not in _integer_or_boolean_dtypes
):
raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.bitwise_or(x1._array, x2._array))
# Note: the function name is different here
def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.right_shift <numpy.right_shift>`.
See its docstring for more information.
"""
if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
# Note: bitwise_right_shift is only defined for x2 nonnegative.
if np.any(x2._array < 0):
raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
return Array._new(np.right_shift(x1._array, x2._array))
def bitwise_xor(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.bitwise_xor <numpy.bitwise_xor>`.
See its docstring for more information.
"""
if (
x1.dtype not in _integer_or_boolean_dtypes
or x2.dtype not in _integer_or_boolean_dtypes
):
raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.bitwise_xor(x1._array, x2._array))
def ceil(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.ceil <numpy.ceil>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in ceil")
if x.dtype in _integer_dtypes:
# Note: The return dtype of ceil is the same as the input
return x
return Array._new(np.ceil(x._array))
def cos(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.cos <numpy.cos>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in cos")
return Array._new(np.cos(x._array))
def cosh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.cosh <numpy.cosh>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in cosh")
return Array._new(np.cosh(x._array))
def divide(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.divide <numpy.divide>`.
See its docstring for more information.
"""
if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in divide")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.divide(x1._array, x2._array))
def equal(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.equal <numpy.equal>`.
See its docstring for more information.
"""
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.equal(x1._array, x2._array))
def exp(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.exp <numpy.exp>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in exp")
return Array._new(np.exp(x._array))
def expm1(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.expm1 <numpy.expm1>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in expm1")
return Array._new(np.expm1(x._array))
def floor(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.floor <numpy.floor>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in floor")
if x.dtype in _integer_dtypes:
# Note: The return dtype of floor is the same as the input
return x
return Array._new(np.floor(x._array))
def floor_divide(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.floor_divide <numpy.floor_divide>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in floor_divide")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.floor_divide(x1._array, x2._array))
def greater(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.greater <numpy.greater>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in greater")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.greater(x1._array, x2._array))
def greater_equal(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.greater_equal <numpy.greater_equal>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in greater_equal")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.greater_equal(x1._array, x2._array))
def isfinite(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.isfinite <numpy.isfinite>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in isfinite")
return Array._new(np.isfinite(x._array))
def isinf(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.isinf <numpy.isinf>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in isinf")
return Array._new(np.isinf(x._array))
def isnan(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.isnan <numpy.isnan>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in isnan")
return Array._new(np.isnan(x._array))
def less(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.less <numpy.less>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in less")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.less(x1._array, x2._array))
def less_equal(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.less_equal <numpy.less_equal>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in less_equal")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.less_equal(x1._array, x2._array))
def log(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.log <numpy.log>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in log")
return Array._new(np.log(x._array))
def log1p(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.log1p <numpy.log1p>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in log1p")
return Array._new(np.log1p(x._array))
def log2(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.log2 <numpy.log2>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in log2")
return Array._new(np.log2(x._array))
def log10(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.log10 <numpy.log10>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in log10")
return Array._new(np.log10(x._array))
def logaddexp(x1: Array, x2: Array) -> Array:
"""
Array API compatible wrapper for :py:func:`np.logaddexp <numpy.logaddexp>`.
See its docstring for more information.
"""
if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in logaddexp")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.logaddexp(x1._array, x2._array))
def logical_and(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.logical_and <numpy.logical_and>`.
See its docstring for more information.
"""
if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
raise TypeError("Only boolean dtypes are allowed in logical_and")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.logical_and(x1._array, x2._array))
def logical_not(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.logical_not <numpy.logical_not>`.
See its docstring for more information.
"""
if x.dtype not in _boolean_dtypes:
raise TypeError("Only boolean dtypes are allowed in logical_not")
return Array._new(np.logical_not(x._array))
def logical_or(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.logical_or <numpy.logical_or>`.
See its docstring for more information.
"""
if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
raise TypeError("Only boolean dtypes are allowed in logical_or")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.logical_or(x1._array, x2._array))
def logical_xor(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.logical_xor <numpy.logical_xor>`.
See its docstring for more information.
"""
if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
raise TypeError("Only boolean dtypes are allowed in logical_xor")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.logical_xor(x1._array, x2._array))
def multiply(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.multiply <numpy.multiply>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in multiply")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.multiply(x1._array, x2._array))
def negative(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.negative <numpy.negative>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in negative")
return Array._new(np.negative(x._array))
def not_equal(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.not_equal <numpy.not_equal>`.
See its docstring for more information.
"""
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.not_equal(x1._array, x2._array))
def positive(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in positive")
return Array._new(np.positive(x._array))
# Note: the function name is different here
def pow(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.power <numpy.power>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in pow")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.power(x1._array, x2._array))
def remainder(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.remainder <numpy.remainder>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in remainder")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.remainder(x1._array, x2._array))
def round(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.round <numpy.round>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in round")
return Array._new(np.round(x._array))
def sign(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.sign <numpy.sign>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in sign")
return Array._new(np.sign(x._array))
def sin(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.sin <numpy.sin>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in sin")
return Array._new(np.sin(x._array))
def sinh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.sinh <numpy.sinh>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in sinh")
return Array._new(np.sinh(x._array))
def square(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.square <numpy.square>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in square")
return Array._new(np.square(x._array))
def sqrt(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.sqrt <numpy.sqrt>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in sqrt")
return Array._new(np.sqrt(x._array))
def subtract(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.subtract <numpy.subtract>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in subtract")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.subtract(x1._array, x2._array))
def tan(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.tan <numpy.tan>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in tan")
return Array._new(np.tan(x._array))
def tanh(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.tanh <numpy.tanh>`.
See its docstring for more information.
"""
if x.dtype not in _floating_dtypes:
raise TypeError("Only floating-point dtypes are allowed in tanh")
return Array._new(np.tanh(x._array))
def trunc(x: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.trunc <numpy.trunc>`.
See its docstring for more information.
"""
if x.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in trunc")
if x.dtype in _integer_dtypes:
# Note: The return dtype of trunc is the same as the input
return x
return Array._new(np.trunc(x._array))
| 24,772 | Python | 32.935616 | 87 | 0.662926 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/_dtypes.py | import numpy as np
# Note: we use dtype objects instead of dtype classes. The spec does not
# require any behavior on dtypes other than equality.
int8 = np.dtype("int8")
int16 = np.dtype("int16")
int32 = np.dtype("int32")
int64 = np.dtype("int64")
uint8 = np.dtype("uint8")
uint16 = np.dtype("uint16")
uint32 = np.dtype("uint32")
uint64 = np.dtype("uint64")
float32 = np.dtype("float32")
float64 = np.dtype("float64")
# Note: This name is changed
bool = np.dtype("bool")
_all_dtypes = (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
_boolean_dtypes = (bool,)
_floating_dtypes = (float32, float64)
_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
_integer_or_boolean_dtypes = (
bool,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
)
_numeric_dtypes = (
float32,
float64,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
)
_dtype_categories = {
"all": _all_dtypes,
"numeric": _numeric_dtypes,
"integer": _integer_dtypes,
"integer or boolean": _integer_or_boolean_dtypes,
"boolean": _boolean_dtypes,
"floating-point": _floating_dtypes,
}
# Note: the spec defines a restricted type promotion table compared to NumPy.
# In particular, cross-kind promotions like integer + float or boolean +
# integer are not allowed, even for functions that accept both kinds.
# Additionally, NumPy promotes signed integer + uint64 to float64, but this
# promotion is not allowed here. To be clear, Python scalar int objects are
# allowed to promote to floating-point dtypes, but only in array operators
# (see Array._promote_scalar) method in _array_object.py.
_promotion_table = {
(int8, int8): int8,
(int8, int16): int16,
(int8, int32): int32,
(int8, int64): int64,
(int16, int8): int16,
(int16, int16): int16,
(int16, int32): int32,
(int16, int64): int64,
(int32, int8): int32,
(int32, int16): int32,
(int32, int32): int32,
(int32, int64): int64,
(int64, int8): int64,
(int64, int16): int64,
(int64, int32): int64,
(int64, int64): int64,
(uint8, uint8): uint8,
(uint8, uint16): uint16,
(uint8, uint32): uint32,
(uint8, uint64): uint64,
(uint16, uint8): uint16,
(uint16, uint16): uint16,
(uint16, uint32): uint32,
(uint16, uint64): uint64,
(uint32, uint8): uint32,
(uint32, uint16): uint32,
(uint32, uint32): uint32,
(uint32, uint64): uint64,
(uint64, uint8): uint64,
(uint64, uint16): uint64,
(uint64, uint32): uint64,
(uint64, uint64): uint64,
(int8, uint8): int16,
(int8, uint16): int32,
(int8, uint32): int64,
(int16, uint8): int16,
(int16, uint16): int32,
(int16, uint32): int64,
(int32, uint8): int32,
(int32, uint16): int32,
(int32, uint32): int64,
(int64, uint8): int64,
(int64, uint16): int64,
(int64, uint32): int64,
(uint8, int8): int16,
(uint16, int8): int32,
(uint32, int8): int64,
(uint8, int16): int16,
(uint16, int16): int32,
(uint32, int16): int64,
(uint8, int32): int32,
(uint16, int32): int32,
(uint32, int32): int64,
(uint8, int64): int64,
(uint16, int64): int64,
(uint32, int64): int64,
(float32, float32): float32,
(float32, float64): float64,
(float64, float32): float64,
(float64, float64): float64,
(bool, bool): bool,
}
def _result_type(type1, type2):
if (type1, type2) in _promotion_table:
return _promotion_table[type1, type2]
raise TypeError(f"{type1} and {type2} cannot be type promoted together")
| 3,707 | Python | 24.75 | 77 | 0.623415 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_elementwise_functions.py | from inspect import getfullargspec
from numpy.testing import assert_raises
from .. import asarray, _elementwise_functions
from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
from .._dtypes import (
_dtype_categories,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
)
def nargs(func):
return len(getfullargspec(func).args)
def test_function_types():
# Test that every function accepts only the required input types. We only
# test the negative cases here (error). The positive cases are tested in
# the array API test suite.
elementwise_function_input_types = {
"abs": "numeric",
"acos": "floating-point",
"acosh": "floating-point",
"add": "numeric",
"asin": "floating-point",
"asinh": "floating-point",
"atan": "floating-point",
"atan2": "floating-point",
"atanh": "floating-point",
"bitwise_and": "integer or boolean",
"bitwise_invert": "integer or boolean",
"bitwise_left_shift": "integer",
"bitwise_or": "integer or boolean",
"bitwise_right_shift": "integer",
"bitwise_xor": "integer or boolean",
"ceil": "numeric",
"cos": "floating-point",
"cosh": "floating-point",
"divide": "floating-point",
"equal": "all",
"exp": "floating-point",
"expm1": "floating-point",
"floor": "numeric",
"floor_divide": "numeric",
"greater": "numeric",
"greater_equal": "numeric",
"isfinite": "numeric",
"isinf": "numeric",
"isnan": "numeric",
"less": "numeric",
"less_equal": "numeric",
"log": "floating-point",
"logaddexp": "floating-point",
"log10": "floating-point",
"log1p": "floating-point",
"log2": "floating-point",
"logical_and": "boolean",
"logical_not": "boolean",
"logical_or": "boolean",
"logical_xor": "boolean",
"multiply": "numeric",
"negative": "numeric",
"not_equal": "all",
"positive": "numeric",
"pow": "numeric",
"remainder": "numeric",
"round": "numeric",
"sign": "numeric",
"sin": "floating-point",
"sinh": "floating-point",
"sqrt": "floating-point",
"square": "numeric",
"subtract": "numeric",
"tan": "floating-point",
"tanh": "floating-point",
"trunc": "numeric",
}
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for x in _array_vals():
for func_name, types in elementwise_function_input_types.items():
dtypes = _dtype_categories[types]
func = getattr(_elementwise_functions, func_name)
if nargs(func) == 2:
for y in _array_vals():
if x.dtype not in dtypes or y.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x, y))
else:
if x.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x))
def test_bitwise_shift_error():
# bitwise shift functions should raise when the second argument is negative
assert_raises(
ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
)
assert_raises(
ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
)
| 3,619 | Python | 31.321428 | 82 | 0.554849 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_array_object.py | import operator
from numpy.testing import assert_raises
import numpy as np
import pytest
from .. import ones, asarray, reshape, result_type, all, equal
from .._array_object import Array
from .._dtypes import (
_all_dtypes,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_numeric_dtypes,
int8,
int16,
int32,
int64,
uint64,
bool as bool_,
)
def test_validate_index():
# The indexing tests in the official array API test suite test that the
# array object correctly handles the subset of indices that are required
# by the spec. But the NumPy array API implementation specifically
# disallows any index not required by the spec, via Array._validate_index.
# This test focuses on testing that non-valid indices are correctly
# rejected. See
# https://data-apis.org/array-api/latest/API_specification/indexing.html
# and the docstring of Array._validate_index for the exact indexing
# behavior that should be allowed. This does not test indices that are
# already invalid in NumPy itself because Array will generally just pass
# such indices directly to the underlying np.ndarray.
a = ones((3, 4))
# Out of bounds slices are not allowed
assert_raises(IndexError, lambda: a[:4])
assert_raises(IndexError, lambda: a[:-4])
assert_raises(IndexError, lambda: a[:3:-1])
assert_raises(IndexError, lambda: a[:-5:-1])
assert_raises(IndexError, lambda: a[4:])
assert_raises(IndexError, lambda: a[-4:])
assert_raises(IndexError, lambda: a[4::-1])
assert_raises(IndexError, lambda: a[-4::-1])
assert_raises(IndexError, lambda: a[...,:5])
assert_raises(IndexError, lambda: a[...,:-5])
assert_raises(IndexError, lambda: a[...,:5:-1])
assert_raises(IndexError, lambda: a[...,:-6:-1])
assert_raises(IndexError, lambda: a[...,5:])
assert_raises(IndexError, lambda: a[...,-5:])
assert_raises(IndexError, lambda: a[...,5::-1])
assert_raises(IndexError, lambda: a[...,-5::-1])
# Boolean indices cannot be part of a larger tuple index
assert_raises(IndexError, lambda: a[a[:,0]==1,0])
assert_raises(IndexError, lambda: a[a[:,0]==1,...])
assert_raises(IndexError, lambda: a[..., a[0]==1])
assert_raises(IndexError, lambda: a[[True, True, True]])
assert_raises(IndexError, lambda: a[(True, True, True),])
# Integer array indices are not allowed (except for 0-D)
idx = asarray([[0, 1]])
assert_raises(IndexError, lambda: a[idx])
assert_raises(IndexError, lambda: a[idx,])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
# Multiaxis indices must contain exactly as many indices as dimensions
assert_raises(IndexError, lambda: a[()])
assert_raises(IndexError, lambda: a[0,])
assert_raises(IndexError, lambda: a[0])
assert_raises(IndexError, lambda: a[:])
def test_operators():
# For every operator, we test that it works for the required type
# combinations and raises TypeError otherwise
binary_op_dtypes = {
"__add__": "numeric",
"__and__": "integer_or_boolean",
"__eq__": "all",
"__floordiv__": "numeric",
"__ge__": "numeric",
"__gt__": "numeric",
"__le__": "numeric",
"__lshift__": "integer",
"__lt__": "numeric",
"__mod__": "numeric",
"__mul__": "numeric",
"__ne__": "all",
"__or__": "integer_or_boolean",
"__pow__": "numeric",
"__rshift__": "integer",
"__sub__": "numeric",
"__truediv__": "floating",
"__xor__": "integer_or_boolean",
}
# Recompute each time because of in-place ops
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for op, dtypes in binary_op_dtypes.items():
ops = [op]
if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
rop = "__r" + op[2:]
iop = "__i" + op[2:]
ops += [rop, iop]
for s in [1, 1.0, False]:
for _op in ops:
for a in _array_vals():
# Test array op scalar. From the spec, the following combinations
# are supported:
# - Python bool for a bool array dtype,
# - a Python int within the bounds of the given dtype for integer array dtypes,
# - a Python int or float for floating-point array dtypes
# We do not do bounds checking for int scalars, but rather use the default
# NumPy behavior for casting in that case.
if ((dtypes == "all"
or dtypes == "numeric" and a.dtype in _numeric_dtypes
or dtypes == "integer" and a.dtype in _integer_dtypes
or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
or dtypes == "boolean" and a.dtype in _boolean_dtypes
or dtypes == "floating" and a.dtype in _floating_dtypes
)
# bool is a subtype of int, which is why we avoid
# isinstance here.
and (a.dtype in _boolean_dtypes and type(s) == bool
or a.dtype in _integer_dtypes and type(s) == int
or a.dtype in _floating_dtypes and type(s) in [float, int]
)):
# Only test for no error
getattr(a, _op)(s)
else:
assert_raises(TypeError, lambda: getattr(a, _op)(s))
# Test array op array.
for _op in ops:
for x in _array_vals():
for y in _array_vals():
# See the promotion table in NEP 47 or the array
# API spec page on type promotion. Mixed kind
# promotion is not defined.
if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
):
assert_raises(TypeError, lambda: getattr(x, _op)(y))
# Ensure in-place operators only promote to the same dtype as the left operand.
elif (
_op.startswith("__i")
and result_type(x.dtype, y.dtype) != x.dtype
):
assert_raises(TypeError, lambda: getattr(x, _op)(y))
# Ensure only those dtypes that are required for every operator are allowed.
elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes
or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
):
getattr(x, _op)(y)
else:
assert_raises(TypeError, lambda: getattr(x, _op)(y))
unary_op_dtypes = {
"__abs__": "numeric",
"__invert__": "integer_or_boolean",
"__neg__": "numeric",
"__pos__": "numeric",
}
for op, dtypes in unary_op_dtypes.items():
for a in _array_vals():
if (
dtypes == "numeric"
and a.dtype in _numeric_dtypes
or dtypes == "integer_or_boolean"
and a.dtype in _integer_or_boolean_dtypes
):
# Only test for no error
getattr(a, op)()
else:
assert_raises(TypeError, lambda: getattr(a, op)())
# Finally, matmul() must be tested separately, because it works a bit
# different from the other operations.
def _matmul_array_vals():
for a in _array_vals():
yield a
for d in _all_dtypes:
yield ones((3, 4), dtype=d)
yield ones((4, 2), dtype=d)
yield ones((4, 4), dtype=d)
# Scalars always error
for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
for s in [1, 1.0, False]:
for a in _matmul_array_vals():
if (type(s) in [float, int] and a.dtype in _floating_dtypes
or type(s) == int and a.dtype in _integer_dtypes):
# Type promotion is valid, but @ is not allowed on 0-D
# inputs, so the error is a ValueError
assert_raises(ValueError, lambda: getattr(a, _op)(s))
else:
assert_raises(TypeError, lambda: getattr(a, _op)(s))
for x in _matmul_array_vals():
for y in _matmul_array_vals():
if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
or x.dtype in _boolean_dtypes
or y.dtype in _boolean_dtypes
):
assert_raises(TypeError, lambda: x.__matmul__(y))
assert_raises(TypeError, lambda: y.__rmatmul__(x))
assert_raises(TypeError, lambda: x.__imatmul__(y))
elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
assert_raises(ValueError, lambda: x.__matmul__(y))
assert_raises(ValueError, lambda: y.__rmatmul__(x))
if result_type(x.dtype, y.dtype) != x.dtype:
assert_raises(TypeError, lambda: x.__imatmul__(y))
else:
assert_raises(ValueError, lambda: x.__imatmul__(y))
else:
x.__matmul__(y)
y.__rmatmul__(x)
if result_type(x.dtype, y.dtype) != x.dtype:
assert_raises(TypeError, lambda: x.__imatmul__(y))
elif y.shape[0] != y.shape[1]:
# This one fails because x @ y has a different shape from x
assert_raises(ValueError, lambda: x.__imatmul__(y))
else:
x.__imatmul__(y)
def test_python_scalar_construtors():
b = asarray(False)
i = asarray(0)
f = asarray(0.0)
assert bool(b) == False
assert int(i) == 0
assert float(f) == 0.0
assert operator.index(i) == 0
# bool/int/float should only be allowed on 0-D arrays.
assert_raises(TypeError, lambda: bool(asarray([False])))
assert_raises(TypeError, lambda: int(asarray([0])))
assert_raises(TypeError, lambda: float(asarray([0.0])))
assert_raises(TypeError, lambda: operator.index(asarray([0])))
# bool/int/float should only be allowed on arrays of the corresponding
# dtype
assert_raises(ValueError, lambda: bool(i))
assert_raises(ValueError, lambda: bool(f))
assert_raises(ValueError, lambda: int(b))
assert_raises(ValueError, lambda: int(f))
assert_raises(ValueError, lambda: float(b))
assert_raises(ValueError, lambda: float(i))
assert_raises(TypeError, lambda: operator.index(b))
assert_raises(TypeError, lambda: operator.index(f))
def test_device_property():
a = ones((3, 4))
assert a.device == 'cpu'
assert all(equal(a.to_device('cpu'), a))
assert_raises(ValueError, lambda: a.to_device('gpu'))
assert all(equal(asarray(a, device='cpu'), a))
assert_raises(ValueError, lambda: asarray(a, device='gpu'))
def test_array_properties():
a = ones((1, 2, 3))
b = ones((2, 3))
assert_raises(ValueError, lambda: a.T)
assert isinstance(b.T, Array)
assert b.T.shape == (3, 2)
assert isinstance(a.mT, Array)
assert a.mT.shape == (1, 3, 2)
assert isinstance(b.mT, Array)
assert b.mT.shape == (3, 2)
def test___array__():
a = ones((2, 3), dtype=int16)
assert np.asarray(a) is a._array
b = np.asarray(a, dtype=np.float64)
assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64)))
assert b.dtype == np.float64
def test_allow_newaxis():
a = ones(5)
indexed_a = a[None, :]
assert indexed_a.shape == (1, 5)
def test_disallow_flat_indexing_with_newaxis():
a = ones((3, 3, 3))
with pytest.raises(IndexError):
a[None, 0, 0]
def test_disallow_mask_with_newaxis():
a = ones((3, 3, 3))
with pytest.raises(IndexError):
a[None, asarray(True)]
@pytest.mark.parametrize("shape", [(), (5,), (3, 3, 3)])
@pytest.mark.parametrize("index", ["string", False, True])
def test_error_on_invalid_index(shape, index):
a = ones(shape)
with pytest.raises(IndexError):
a[index]
def test_mask_0d_array_without_errors():
a = ones(())
a[asarray(True)]
@pytest.mark.parametrize(
"i", [slice(5), slice(5, 0), asarray(True), asarray([0, 1])]
)
def test_error_on_invalid_index_with_ellipsis(i):
a = ones((3, 3, 3))
with pytest.raises(IndexError):
a[..., i]
with pytest.raises(IndexError):
a[i, ...]
def test_array_keys_use_private_array():
"""
Indexing operations convert array keys before indexing the internal array
Fails when array_api array keys are not converted into NumPy-proper arrays
in __getitem__(). This is achieved by passing array_api arrays with 0-sized
dimensions, which NumPy-proper treats erroneously - not sure why!
TODO: Find and use appropiate __setitem__() case.
"""
a = ones((0, 0), dtype=bool_)
assert a[a].shape == (0,)
a = ones((0,), dtype=bool_)
key = ones((0, 0), dtype=bool_)
with pytest.raises(IndexError):
a[key]
| 15,770 | Python | 40.944149 | 132 | 0.54293 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_sorting_functions.py | import pytest
from numpy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
| 602 | Python | 24.124999 | 64 | 0.533223 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/__init__.py | """
Tests for the array API namespace.
Note, full compliance with the array API can be tested with the official array API test
suite https://github.com/data-apis/array-api-tests. This test suite primarily
focuses on those things that are not tested by the official test suite.
"""
| 282 | Python | 34.374996 | 87 | 0.776596 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_validation.py | from typing import Callable
import pytest
from numpy import array_api as xp
def p(func: Callable, *args, **kwargs):
f_sig = ", ".join(
[str(a) for a in args] + [f"{k}={v}" for k, v in kwargs.items()]
)
id_ = f"{func.__name__}({f_sig})"
return pytest.param(func, args, kwargs, id=id_)
@pytest.mark.parametrize(
"func, args, kwargs",
[
p(xp.can_cast, 42, xp.int8),
p(xp.can_cast, xp.int8, 42),
p(xp.result_type, 42),
],
)
def test_raises_on_invalid_types(func, args, kwargs):
"""Function raises TypeError when passed invalidly-typed inputs"""
with pytest.raises(TypeError):
func(*args, **kwargs)
| 676 | Python | 23.178571 | 72 | 0.593195 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_data_type_functions.py | import pytest
from numpy import array_api as xp
@pytest.mark.parametrize(
"from_, to, expected",
[
(xp.int8, xp.int16, True),
(xp.int16, xp.int8, False),
(xp.bool, xp.int8, False),
(xp.asarray(0, dtype=xp.uint8), xp.int8, False),
],
)
def test_can_cast(from_, to, expected):
"""
can_cast() returns correct result
"""
assert xp.can_cast(from_, to) == expected
| 422 | Python | 20.149999 | 56 | 0.575829 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_creation_functions.py | from numpy.testing import assert_raises
import numpy as np
from .. import all
from .._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
zeros,
zeros_like,
)
from .._dtypes import float32, float64
from .._array_object import Array
def test_asarray_errors():
# Test various protections against incorrect usage
assert_raises(TypeError, lambda: Array([1]))
assert_raises(TypeError, lambda: asarray(["a"]))
assert_raises(ValueError, lambda: asarray([1.0], dtype=np.float16))
assert_raises(OverflowError, lambda: asarray(2**100))
# Preferably this would be OverflowError
# assert_raises(OverflowError, lambda: asarray([2**100]))
assert_raises(TypeError, lambda: asarray([2**100]))
asarray([1], device="cpu") # Doesn't error
assert_raises(ValueError, lambda: asarray([1], device="gpu"))
assert_raises(ValueError, lambda: asarray([1], dtype=int))
assert_raises(ValueError, lambda: asarray([1], dtype="i"))
def test_asarray_copy():
a = asarray([1])
b = asarray(a, copy=True)
a[0] = 0
assert all(b[0] == 1)
assert all(a[0] == 0)
a = asarray([1])
b = asarray(a, copy=np._CopyMode.ALWAYS)
a[0] = 0
assert all(b[0] == 1)
assert all(a[0] == 0)
a = asarray([1])
b = asarray(a, copy=np._CopyMode.NEVER)
a[0] = 0
assert all(b[0] == 0)
assert_raises(NotImplementedError, lambda: asarray(a, copy=False))
assert_raises(NotImplementedError,
lambda: asarray(a, copy=np._CopyMode.IF_NEEDED))
def test_arange_errors():
arange(1, device="cpu") # Doesn't error
assert_raises(ValueError, lambda: arange(1, device="gpu"))
assert_raises(ValueError, lambda: arange(1, dtype=int))
assert_raises(ValueError, lambda: arange(1, dtype="i"))
def test_empty_errors():
empty((1,), device="cpu") # Doesn't error
assert_raises(ValueError, lambda: empty((1,), device="gpu"))
assert_raises(ValueError, lambda: empty((1,), dtype=int))
assert_raises(ValueError, lambda: empty((1,), dtype="i"))
def test_empty_like_errors():
empty_like(asarray(1), device="cpu") # Doesn't error
assert_raises(ValueError, lambda: empty_like(asarray(1), device="gpu"))
assert_raises(ValueError, lambda: empty_like(asarray(1), dtype=int))
assert_raises(ValueError, lambda: empty_like(asarray(1), dtype="i"))
def test_eye_errors():
eye(1, device="cpu") # Doesn't error
assert_raises(ValueError, lambda: eye(1, device="gpu"))
assert_raises(ValueError, lambda: eye(1, dtype=int))
assert_raises(ValueError, lambda: eye(1, dtype="i"))
def test_full_errors():
full((1,), 0, device="cpu") # Doesn't error
assert_raises(ValueError, lambda: full((1,), 0, device="gpu"))
assert_raises(ValueError, lambda: full((1,), 0, dtype=int))
assert_raises(ValueError, lambda: full((1,), 0, dtype="i"))
def test_full_like_errors():
full_like(asarray(1), 0, device="cpu") # Doesn't error
assert_raises(ValueError, lambda: full_like(asarray(1), 0, device="gpu"))
assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype=int))
assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype="i"))
def test_linspace_errors():
linspace(0, 1, 10, device="cpu") # Doesn't error
assert_raises(ValueError, lambda: linspace(0, 1, 10, device="gpu"))
assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype=float))
assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype="f"))
def test_ones_errors():
ones((1,), device="cpu") # Doesn't error
assert_raises(ValueError, lambda: ones((1,), device="gpu"))
assert_raises(ValueError, lambda: ones((1,), dtype=int))
assert_raises(ValueError, lambda: ones((1,), dtype="i"))
def test_ones_like_errors():
ones_like(asarray(1), device="cpu") # Doesn't error
assert_raises(ValueError, lambda: ones_like(asarray(1), device="gpu"))
assert_raises(ValueError, lambda: ones_like(asarray(1), dtype=int))
assert_raises(ValueError, lambda: ones_like(asarray(1), dtype="i"))
def test_zeros_errors():
zeros((1,), device="cpu") # Doesn't error
assert_raises(ValueError, lambda: zeros((1,), device="gpu"))
assert_raises(ValueError, lambda: zeros((1,), dtype=int))
assert_raises(ValueError, lambda: zeros((1,), dtype="i"))
def test_zeros_like_errors():
zeros_like(asarray(1), device="cpu") # Doesn't error
assert_raises(ValueError, lambda: zeros_like(asarray(1), device="gpu"))
assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype=int))
assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype="i"))
def test_meshgrid_dtype_errors():
# Doesn't raise
meshgrid()
meshgrid(asarray([1.], dtype=float32))
meshgrid(asarray([1.], dtype=float32), asarray([1.], dtype=float32))
assert_raises(ValueError, lambda: meshgrid(asarray([1.], dtype=float32), asarray([1.], dtype=float64)))
| 5,023 | Python | 34.132867 | 107 | 0.655982 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/array_api/tests/test_set_functions.py | import pytest
from hypothesis import given
from hypothesis.extra.array_api import make_strategies_namespace
from numpy import array_api as xp
xps = make_strategies_namespace(xp)
@pytest.mark.parametrize("func", [xp.unique_all, xp.unique_inverse])
@given(xps.arrays(dtype=xps.scalar_dtypes(), shape=xps.array_shapes()))
def test_inverse_indices_shape(func, x):
"""
Inverse indices share shape of input array
See https://github.com/numpy/numpy/issues/20638
"""
out = func(x)
assert out.inverse_indices.shape == x.shape
| 546 | Python | 26.349999 | 71 | 0.730769 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/intranges.py | """
Given a list of integers, made up of (hopefully) a small number of long runs
of consecutive integers, compute a representation of the form
((start1, end1), (start2, end2) ...). Then answer the question "was x present
in the original list?" in time O(log(# runs)).
"""
import bisect
from typing import List, Tuple
def intranges_from_list(list_: List[int]) -> Tuple[int, ...]:
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
Ranges are encoded as single integers (start << 32 | end), not as tuples.
"""
sorted_list = sorted(list_)
ranges = []
last_write = -1
for i in range(len(sorted_list)):
if i+1 < len(sorted_list):
if sorted_list[i] == sorted_list[i+1]-1:
continue
current_range = sorted_list[last_write+1:i+1]
ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
last_write = i
return tuple(ranges)
def _encode_range(start: int, end: int) -> int:
return (start << 32) | end
def _decode_range(r: int) -> Tuple[int, int]:
return (r >> 32), (r & ((1 << 32) - 1))
def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool:
"""Determine if `int_` falls into one of the ranges in `ranges`."""
tuple_ = _encode_range(int_, 0)
pos = bisect.bisect_left(ranges, tuple_)
# we could be immediately ahead of a tuple (start, end)
# with start < int_ <= end
if pos > 0:
left, right = _decode_range(ranges[pos-1])
if left <= int_ < right:
return True
# or we could be immediately behind a tuple (int_, end)
if pos < len(ranges):
left, _ = _decode_range(ranges[pos])
if left == int_:
return True
return False
| 1,881 | Python | 33.218181 | 77 | 0.600744 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/__init__.py | from .package_data import __version__
from .core import (
IDNABidiError,
IDNAError,
InvalidCodepoint,
InvalidCodepointContext,
alabel,
check_bidi,
check_hyphen_ok,
check_initial_combiner,
check_label,
check_nfc,
decode,
encode,
ulabel,
uts46_remap,
valid_contextj,
valid_contexto,
valid_label_length,
valid_string_length,
)
from .intranges import intranges_contain
__all__ = [
"IDNABidiError",
"IDNAError",
"InvalidCodepoint",
"InvalidCodepointContext",
"alabel",
"check_bidi",
"check_hyphen_ok",
"check_initial_combiner",
"check_label",
"check_nfc",
"decode",
"encode",
"intranges_contain",
"ulabel",
"uts46_remap",
"valid_contextj",
"valid_contexto",
"valid_label_length",
"valid_string_length",
]
| 849 | Python | 17.888888 | 40 | 0.617197 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/core.py | from . import idnadata
import bisect
import unicodedata
import re
from typing import Union, Optional
from .intranges import intranges_contain
_virama_combining_class = 9
_alabel_prefix = b'xn--'
_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
class IDNAError(UnicodeError):
""" Base exception for all IDNA-encoding related problems """
pass
class IDNABidiError(IDNAError):
""" Exception when bidirectional requirements are not satisfied """
pass
class InvalidCodepoint(IDNAError):
""" Exception when a disallowed or unallocated codepoint is used """
pass
class InvalidCodepointContext(IDNAError):
""" Exception when the codepoint is not valid in the context it is used """
pass
def _combining_class(cp: int) -> int:
v = unicodedata.combining(chr(cp))
if v == 0:
if not unicodedata.name(chr(cp)):
raise ValueError('Unknown character in unicodedata')
return v
def _is_script(cp: str, script: str) -> bool:
return intranges_contain(ord(cp), idnadata.scripts[script])
def _punycode(s: str) -> bytes:
return s.encode('punycode')
def _unot(s: int) -> str:
return 'U+{:04X}'.format(s)
def valid_label_length(label: Union[bytes, str]) -> bool:
if len(label) > 63:
return False
return True
def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
if len(label) > (254 if trailing_dot else 253):
return False
return True
def check_bidi(label: str, check_ltr: bool = False) -> bool:
# Bidi rules should only be applied if string contains RTL characters
bidi_label = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if direction == '':
# String likely comes from a newer version of Unicode
raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
if direction in ['R', 'AL', 'AN']:
bidi_label = True
if not bidi_label and not check_ltr:
return True
# Bidi rule 1
direction = unicodedata.bidirectional(label[0])
if direction in ['R', 'AL']:
rtl = True
elif direction == 'L':
rtl = False
else:
raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
valid_ending = False
number_type = None # type: Optional[str]
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if rtl:
# Bidi rule 2
if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
# Bidi rule 3
if direction in ['R', 'AL', 'EN', 'AN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
# Bidi rule 4
if direction in ['AN', 'EN']:
if not number_type:
number_type = direction
else:
if number_type != direction:
raise IDNABidiError('Can not mix numeral types in a right-to-left label')
else:
# Bidi rule 5
if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
# Bidi rule 6
if direction in ['L', 'EN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
if not valid_ending:
raise IDNABidiError('Label ends with illegal codepoint directionality')
return True
def check_initial_combiner(label: str) -> bool:
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
def check_hyphen_ok(label: str) -> bool:
if label[2:4] == '--':
raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
if label[0] == '-' or label[-1] == '-':
raise IDNAError('Label must not start or end with a hyphen')
return True
def check_nfc(label: str) -> None:
if unicodedata.normalize('NFC', label) != label:
raise IDNAError('Label must be in Normalization Form C')
def valid_contextj(label: str, pos: int) -> bool:
cp_value = ord(label[pos])
if cp_value == 0x200c:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
ok = False
for i in range(pos-1, -1, -1):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord('T'):
continue
if joining_type in [ord('L'), ord('D')]:
ok = True
break
if not ok:
return False
ok = False
for i in range(pos+1, len(label)):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord('T'):
continue
if joining_type in [ord('R'), ord('D')]:
ok = True
break
return ok
if cp_value == 0x200d:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
return False
else:
return False
def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
cp_value = ord(label[pos])
if cp_value == 0x00b7:
if 0 < pos < len(label)-1:
if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
return True
return False
elif cp_value == 0x0375:
if pos < len(label)-1 and len(label) > 1:
return _is_script(label[pos + 1], 'Greek')
return False
elif cp_value == 0x05f3 or cp_value == 0x05f4:
if pos > 0:
return _is_script(label[pos - 1], 'Hebrew')
return False
elif cp_value == 0x30fb:
for cp in label:
if cp == '\u30fb':
continue
if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
return True
return False
elif 0x660 <= cp_value <= 0x669:
for cp in label:
if 0x6f0 <= ord(cp) <= 0x06f9:
return False
return True
elif 0x6f0 <= cp_value <= 0x6f9:
for cp in label:
if 0x660 <= ord(cp) <= 0x0669:
return False
return True
return False
def check_label(label: Union[str, bytes, bytearray]) -> None:
if isinstance(label, (bytes, bytearray)):
label = label.decode('utf-8')
if len(label) == 0:
raise IDNAError('Empty Label')
check_nfc(label)
check_hyphen_ok(label)
check_initial_combiner(label)
for (pos, cp) in enumerate(label):
cp_value = ord(cp)
if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
continue
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
try:
if not valid_contextj(label, pos):
raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
_unot(cp_value), pos+1, repr(label)))
except ValueError:
raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
_unot(cp_value), pos+1, repr(label)))
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
if not valid_contexto(label, pos):
raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
else:
raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
check_bidi(label)
def alabel(label: str) -> bytes:
try:
label_bytes = label.encode('ascii')
ulabel(label_bytes)
if not valid_label_length(label_bytes):
raise IDNAError('Label too long')
return label_bytes
except UnicodeEncodeError:
pass
if not label:
raise IDNAError('No Input')
label = str(label)
check_label(label)
label_bytes = _punycode(label)
label_bytes = _alabel_prefix + label_bytes
if not valid_label_length(label_bytes):
raise IDNAError('Label too long')
return label_bytes
def ulabel(label: Union[str, bytes, bytearray]) -> str:
if not isinstance(label, (bytes, bytearray)):
try:
label_bytes = label.encode('ascii')
except UnicodeEncodeError:
check_label(label)
return label
else:
label_bytes = label
label_bytes = label_bytes.lower()
if label_bytes.startswith(_alabel_prefix):
label_bytes = label_bytes[len(_alabel_prefix):]
if not label_bytes:
raise IDNAError('Malformed A-label, no Punycode eligible content found')
if label_bytes.decode('ascii')[-1] == '-':
raise IDNAError('A-label must not end with a hyphen')
else:
check_label(label_bytes)
return label_bytes.decode('ascii')
try:
label = label_bytes.decode('punycode')
except UnicodeError:
raise IDNAError('Invalid A-label')
check_label(label)
return label
def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
"""Re-map the characters in the string according to UTS46 processing."""
from .uts46data import uts46data
output = ''
for pos, char in enumerate(domain):
code_point = ord(char)
try:
uts46row = uts46data[code_point if code_point < 256 else
bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
status = uts46row[1]
replacement = None # type: Optional[str]
if len(uts46row) == 3:
replacement = uts46row[2] # type: ignore
if (status == 'V' or
(status == 'D' and not transitional) or
(status == '3' and not std3_rules and replacement is None)):
output += char
elif replacement is not None and (status == 'M' or
(status == '3' and not std3_rules) or
(status == 'D' and transitional)):
output += replacement
elif status != 'I':
raise IndexError()
except IndexError:
raise InvalidCodepoint(
'Codepoint {} not allowed at position {} in {}'.format(
_unot(code_point), pos + 1, repr(domain)))
return unicodedata.normalize('NFC', output)
def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
if isinstance(s, (bytes, bytearray)):
try:
s = s.decode('ascii')
except UnicodeDecodeError:
raise IDNAError('should pass a unicode string to the function rather than a byte string.')
if uts46:
s = uts46_remap(s, std3_rules, transitional)
trailing_dot = False
result = []
if strict:
labels = s.split('.')
else:
labels = _unicode_dots_re.split(s)
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if labels[-1] == '':
del labels[-1]
trailing_dot = True
for label in labels:
s = alabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append(b'')
s = b'.'.join(result)
if not valid_string_length(s, trailing_dot):
raise IDNAError('Domain too long')
return s
def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
try:
if isinstance(s, (bytes, bytearray)):
s = s.decode('ascii')
except UnicodeDecodeError:
raise IDNAError('Invalid ASCII in A-label')
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
result = []
if not strict:
labels = _unicode_dots_re.split(s)
else:
labels = s.split('.')
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if not labels[-1]:
del labels[-1]
trailing_dot = True
for label in labels:
s = ulabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append('')
return '.'.join(result)
| 12,950 | Python | 31.296758 | 150 | 0.569807 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/codec.py | from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
from typing import Tuple, Optional
_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
class Codec(codecs.Codec):
def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return b"", 0
return encode(data), len(data)
def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return '', 0
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return "", 0
labels = _unicode_dots_re.split(data)
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(alabel(label))
if size:
size += 1
size += len(label)
# Join with U+002E
result_str = '.'.join(result) + trailing_dot # type: ignore
size += len(trailing_dot)
return result_str, size
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return ('', 0)
labels = _unicode_dots_re.split(data)
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result_str = '.'.join(result) + trailing_dot
size += len(trailing_dot)
return (result_str, size)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry() -> codecs.CodecInfo:
# Compatibility as a search_function for codecs.register()
return codecs.CodecInfo(
name='idna',
encode=Codec().encode, # type: ignore
decode=Codec().decode, # type: ignore
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| 3,374 | Python | 28.867256 | 101 | 0.553053 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/package_data.py | __version__ = '3.4'
| 21 | Python | 6.333331 | 19 | 0.428571 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/idnadata.py | # This file is automatically generated by tools/idna-data
__version__ = '15.0.0'
scripts = {
'Greek': (
0x37000000374,
0x37500000378,
0x37a0000037e,
0x37f00000380,
0x38400000385,
0x38600000387,
0x3880000038b,
0x38c0000038d,
0x38e000003a2,
0x3a3000003e2,
0x3f000000400,
0x1d2600001d2b,
0x1d5d00001d62,
0x1d6600001d6b,
0x1dbf00001dc0,
0x1f0000001f16,
0x1f1800001f1e,
0x1f2000001f46,
0x1f4800001f4e,
0x1f5000001f58,
0x1f5900001f5a,
0x1f5b00001f5c,
0x1f5d00001f5e,
0x1f5f00001f7e,
0x1f8000001fb5,
0x1fb600001fc5,
0x1fc600001fd4,
0x1fd600001fdc,
0x1fdd00001ff0,
0x1ff200001ff5,
0x1ff600001fff,
0x212600002127,
0xab650000ab66,
0x101400001018f,
0x101a0000101a1,
0x1d2000001d246,
),
'Han': (
0x2e8000002e9a,
0x2e9b00002ef4,
0x2f0000002fd6,
0x300500003006,
0x300700003008,
0x30210000302a,
0x30380000303c,
0x340000004dc0,
0x4e000000a000,
0xf9000000fa6e,
0xfa700000fada,
0x16fe200016fe4,
0x16ff000016ff2,
0x200000002a6e0,
0x2a7000002b73a,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
0x2f8000002fa1e,
0x300000003134b,
0x31350000323b0,
),
'Hebrew': (
0x591000005c8,
0x5d0000005eb,
0x5ef000005f5,
0xfb1d0000fb37,
0xfb380000fb3d,
0xfb3e0000fb3f,
0xfb400000fb42,
0xfb430000fb45,
0xfb460000fb50,
),
'Hiragana': (
0x304100003097,
0x309d000030a0,
0x1b0010001b120,
0x1b1320001b133,
0x1b1500001b153,
0x1f2000001f201,
),
'Katakana': (
0x30a1000030fb,
0x30fd00003100,
0x31f000003200,
0x32d0000032ff,
0x330000003358,
0xff660000ff70,
0xff710000ff9e,
0x1aff00001aff4,
0x1aff50001affc,
0x1affd0001afff,
0x1b0000001b001,
0x1b1200001b123,
0x1b1550001b156,
0x1b1640001b168,
),
}
joining_types = {
0x600: 85,
0x601: 85,
0x602: 85,
0x603: 85,
0x604: 85,
0x605: 85,
0x608: 85,
0x60b: 85,
0x620: 68,
0x621: 85,
0x622: 82,
0x623: 82,
0x624: 82,
0x625: 82,
0x626: 68,
0x627: 82,
0x628: 68,
0x629: 82,
0x62a: 68,
0x62b: 68,
0x62c: 68,
0x62d: 68,
0x62e: 68,
0x62f: 82,
0x630: 82,
0x631: 82,
0x632: 82,
0x633: 68,
0x634: 68,
0x635: 68,
0x636: 68,
0x637: 68,
0x638: 68,
0x639: 68,
0x63a: 68,
0x63b: 68,
0x63c: 68,
0x63d: 68,
0x63e: 68,
0x63f: 68,
0x640: 67,
0x641: 68,
0x642: 68,
0x643: 68,
0x644: 68,
0x645: 68,
0x646: 68,
0x647: 68,
0x648: 82,
0x649: 68,
0x64a: 68,
0x66e: 68,
0x66f: 68,
0x671: 82,
0x672: 82,
0x673: 82,
0x674: 85,
0x675: 82,
0x676: 82,
0x677: 82,
0x678: 68,
0x679: 68,
0x67a: 68,
0x67b: 68,
0x67c: 68,
0x67d: 68,
0x67e: 68,
0x67f: 68,
0x680: 68,
0x681: 68,
0x682: 68,
0x683: 68,
0x684: 68,
0x685: 68,
0x686: 68,
0x687: 68,
0x688: 82,
0x689: 82,
0x68a: 82,
0x68b: 82,
0x68c: 82,
0x68d: 82,
0x68e: 82,
0x68f: 82,
0x690: 82,
0x691: 82,
0x692: 82,
0x693: 82,
0x694: 82,
0x695: 82,
0x696: 82,
0x697: 82,
0x698: 82,
0x699: 82,
0x69a: 68,
0x69b: 68,
0x69c: 68,
0x69d: 68,
0x69e: 68,
0x69f: 68,
0x6a0: 68,
0x6a1: 68,
0x6a2: 68,
0x6a3: 68,
0x6a4: 68,
0x6a5: 68,
0x6a6: 68,
0x6a7: 68,
0x6a8: 68,
0x6a9: 68,
0x6aa: 68,
0x6ab: 68,
0x6ac: 68,
0x6ad: 68,
0x6ae: 68,
0x6af: 68,
0x6b0: 68,
0x6b1: 68,
0x6b2: 68,
0x6b3: 68,
0x6b4: 68,
0x6b5: 68,
0x6b6: 68,
0x6b7: 68,
0x6b8: 68,
0x6b9: 68,
0x6ba: 68,
0x6bb: 68,
0x6bc: 68,
0x6bd: 68,
0x6be: 68,
0x6bf: 68,
0x6c0: 82,
0x6c1: 68,
0x6c2: 68,
0x6c3: 82,
0x6c4: 82,
0x6c5: 82,
0x6c6: 82,
0x6c7: 82,
0x6c8: 82,
0x6c9: 82,
0x6ca: 82,
0x6cb: 82,
0x6cc: 68,
0x6cd: 82,
0x6ce: 68,
0x6cf: 82,
0x6d0: 68,
0x6d1: 68,
0x6d2: 82,
0x6d3: 82,
0x6d5: 82,
0x6dd: 85,
0x6ee: 82,
0x6ef: 82,
0x6fa: 68,
0x6fb: 68,
0x6fc: 68,
0x6ff: 68,
0x70f: 84,
0x710: 82,
0x712: 68,
0x713: 68,
0x714: 68,
0x715: 82,
0x716: 82,
0x717: 82,
0x718: 82,
0x719: 82,
0x71a: 68,
0x71b: 68,
0x71c: 68,
0x71d: 68,
0x71e: 82,
0x71f: 68,
0x720: 68,
0x721: 68,
0x722: 68,
0x723: 68,
0x724: 68,
0x725: 68,
0x726: 68,
0x727: 68,
0x728: 82,
0x729: 68,
0x72a: 82,
0x72b: 68,
0x72c: 82,
0x72d: 68,
0x72e: 68,
0x72f: 82,
0x74d: 82,
0x74e: 68,
0x74f: 68,
0x750: 68,
0x751: 68,
0x752: 68,
0x753: 68,
0x754: 68,
0x755: 68,
0x756: 68,
0x757: 68,
0x758: 68,
0x759: 82,
0x75a: 82,
0x75b: 82,
0x75c: 68,
0x75d: 68,
0x75e: 68,
0x75f: 68,
0x760: 68,
0x761: 68,
0x762: 68,
0x763: 68,
0x764: 68,
0x765: 68,
0x766: 68,
0x767: 68,
0x768: 68,
0x769: 68,
0x76a: 68,
0x76b: 82,
0x76c: 82,
0x76d: 68,
0x76e: 68,
0x76f: 68,
0x770: 68,
0x771: 82,
0x772: 68,
0x773: 82,
0x774: 82,
0x775: 68,
0x776: 68,
0x777: 68,
0x778: 82,
0x779: 82,
0x77a: 68,
0x77b: 68,
0x77c: 68,
0x77d: 68,
0x77e: 68,
0x77f: 68,
0x7ca: 68,
0x7cb: 68,
0x7cc: 68,
0x7cd: 68,
0x7ce: 68,
0x7cf: 68,
0x7d0: 68,
0x7d1: 68,
0x7d2: 68,
0x7d3: 68,
0x7d4: 68,
0x7d5: 68,
0x7d6: 68,
0x7d7: 68,
0x7d8: 68,
0x7d9: 68,
0x7da: 68,
0x7db: 68,
0x7dc: 68,
0x7dd: 68,
0x7de: 68,
0x7df: 68,
0x7e0: 68,
0x7e1: 68,
0x7e2: 68,
0x7e3: 68,
0x7e4: 68,
0x7e5: 68,
0x7e6: 68,
0x7e7: 68,
0x7e8: 68,
0x7e9: 68,
0x7ea: 68,
0x7fa: 67,
0x840: 82,
0x841: 68,
0x842: 68,
0x843: 68,
0x844: 68,
0x845: 68,
0x846: 82,
0x847: 82,
0x848: 68,
0x849: 82,
0x84a: 68,
0x84b: 68,
0x84c: 68,
0x84d: 68,
0x84e: 68,
0x84f: 68,
0x850: 68,
0x851: 68,
0x852: 68,
0x853: 68,
0x854: 82,
0x855: 68,
0x856: 82,
0x857: 82,
0x858: 82,
0x860: 68,
0x861: 85,
0x862: 68,
0x863: 68,
0x864: 68,
0x865: 68,
0x866: 85,
0x867: 82,
0x868: 68,
0x869: 82,
0x86a: 82,
0x870: 82,
0x871: 82,
0x872: 82,
0x873: 82,
0x874: 82,
0x875: 82,
0x876: 82,
0x877: 82,
0x878: 82,
0x879: 82,
0x87a: 82,
0x87b: 82,
0x87c: 82,
0x87d: 82,
0x87e: 82,
0x87f: 82,
0x880: 82,
0x881: 82,
0x882: 82,
0x883: 67,
0x884: 67,
0x885: 67,
0x886: 68,
0x887: 85,
0x888: 85,
0x889: 68,
0x88a: 68,
0x88b: 68,
0x88c: 68,
0x88d: 68,
0x88e: 82,
0x890: 85,
0x891: 85,
0x8a0: 68,
0x8a1: 68,
0x8a2: 68,
0x8a3: 68,
0x8a4: 68,
0x8a5: 68,
0x8a6: 68,
0x8a7: 68,
0x8a8: 68,
0x8a9: 68,
0x8aa: 82,
0x8ab: 82,
0x8ac: 82,
0x8ad: 85,
0x8ae: 82,
0x8af: 68,
0x8b0: 68,
0x8b1: 82,
0x8b2: 82,
0x8b3: 68,
0x8b4: 68,
0x8b5: 68,
0x8b6: 68,
0x8b7: 68,
0x8b8: 68,
0x8b9: 82,
0x8ba: 68,
0x8bb: 68,
0x8bc: 68,
0x8bd: 68,
0x8be: 68,
0x8bf: 68,
0x8c0: 68,
0x8c1: 68,
0x8c2: 68,
0x8c3: 68,
0x8c4: 68,
0x8c5: 68,
0x8c6: 68,
0x8c7: 68,
0x8c8: 68,
0x8e2: 85,
0x1806: 85,
0x1807: 68,
0x180a: 67,
0x180e: 85,
0x1820: 68,
0x1821: 68,
0x1822: 68,
0x1823: 68,
0x1824: 68,
0x1825: 68,
0x1826: 68,
0x1827: 68,
0x1828: 68,
0x1829: 68,
0x182a: 68,
0x182b: 68,
0x182c: 68,
0x182d: 68,
0x182e: 68,
0x182f: 68,
0x1830: 68,
0x1831: 68,
0x1832: 68,
0x1833: 68,
0x1834: 68,
0x1835: 68,
0x1836: 68,
0x1837: 68,
0x1838: 68,
0x1839: 68,
0x183a: 68,
0x183b: 68,
0x183c: 68,
0x183d: 68,
0x183e: 68,
0x183f: 68,
0x1840: 68,
0x1841: 68,
0x1842: 68,
0x1843: 68,
0x1844: 68,
0x1845: 68,
0x1846: 68,
0x1847: 68,
0x1848: 68,
0x1849: 68,
0x184a: 68,
0x184b: 68,
0x184c: 68,
0x184d: 68,
0x184e: 68,
0x184f: 68,
0x1850: 68,
0x1851: 68,
0x1852: 68,
0x1853: 68,
0x1854: 68,
0x1855: 68,
0x1856: 68,
0x1857: 68,
0x1858: 68,
0x1859: 68,
0x185a: 68,
0x185b: 68,
0x185c: 68,
0x185d: 68,
0x185e: 68,
0x185f: 68,
0x1860: 68,
0x1861: 68,
0x1862: 68,
0x1863: 68,
0x1864: 68,
0x1865: 68,
0x1866: 68,
0x1867: 68,
0x1868: 68,
0x1869: 68,
0x186a: 68,
0x186b: 68,
0x186c: 68,
0x186d: 68,
0x186e: 68,
0x186f: 68,
0x1870: 68,
0x1871: 68,
0x1872: 68,
0x1873: 68,
0x1874: 68,
0x1875: 68,
0x1876: 68,
0x1877: 68,
0x1878: 68,
0x1880: 85,
0x1881: 85,
0x1882: 85,
0x1883: 85,
0x1884: 85,
0x1885: 84,
0x1886: 84,
0x1887: 68,
0x1888: 68,
0x1889: 68,
0x188a: 68,
0x188b: 68,
0x188c: 68,
0x188d: 68,
0x188e: 68,
0x188f: 68,
0x1890: 68,
0x1891: 68,
0x1892: 68,
0x1893: 68,
0x1894: 68,
0x1895: 68,
0x1896: 68,
0x1897: 68,
0x1898: 68,
0x1899: 68,
0x189a: 68,
0x189b: 68,
0x189c: 68,
0x189d: 68,
0x189e: 68,
0x189f: 68,
0x18a0: 68,
0x18a1: 68,
0x18a2: 68,
0x18a3: 68,
0x18a4: 68,
0x18a5: 68,
0x18a6: 68,
0x18a7: 68,
0x18a8: 68,
0x18aa: 68,
0x200c: 85,
0x200d: 67,
0x202f: 85,
0x2066: 85,
0x2067: 85,
0x2068: 85,
0x2069: 85,
0xa840: 68,
0xa841: 68,
0xa842: 68,
0xa843: 68,
0xa844: 68,
0xa845: 68,
0xa846: 68,
0xa847: 68,
0xa848: 68,
0xa849: 68,
0xa84a: 68,
0xa84b: 68,
0xa84c: 68,
0xa84d: 68,
0xa84e: 68,
0xa84f: 68,
0xa850: 68,
0xa851: 68,
0xa852: 68,
0xa853: 68,
0xa854: 68,
0xa855: 68,
0xa856: 68,
0xa857: 68,
0xa858: 68,
0xa859: 68,
0xa85a: 68,
0xa85b: 68,
0xa85c: 68,
0xa85d: 68,
0xa85e: 68,
0xa85f: 68,
0xa860: 68,
0xa861: 68,
0xa862: 68,
0xa863: 68,
0xa864: 68,
0xa865: 68,
0xa866: 68,
0xa867: 68,
0xa868: 68,
0xa869: 68,
0xa86a: 68,
0xa86b: 68,
0xa86c: 68,
0xa86d: 68,
0xa86e: 68,
0xa86f: 68,
0xa870: 68,
0xa871: 68,
0xa872: 76,
0xa873: 85,
0x10ac0: 68,
0x10ac1: 68,
0x10ac2: 68,
0x10ac3: 68,
0x10ac4: 68,
0x10ac5: 82,
0x10ac6: 85,
0x10ac7: 82,
0x10ac8: 85,
0x10ac9: 82,
0x10aca: 82,
0x10acb: 85,
0x10acc: 85,
0x10acd: 76,
0x10ace: 82,
0x10acf: 82,
0x10ad0: 82,
0x10ad1: 82,
0x10ad2: 82,
0x10ad3: 68,
0x10ad4: 68,
0x10ad5: 68,
0x10ad6: 68,
0x10ad7: 76,
0x10ad8: 68,
0x10ad9: 68,
0x10ada: 68,
0x10adb: 68,
0x10adc: 68,
0x10add: 82,
0x10ade: 68,
0x10adf: 68,
0x10ae0: 68,
0x10ae1: 82,
0x10ae2: 85,
0x10ae3: 85,
0x10ae4: 82,
0x10aeb: 68,
0x10aec: 68,
0x10aed: 68,
0x10aee: 68,
0x10aef: 82,
0x10b80: 68,
0x10b81: 82,
0x10b82: 68,
0x10b83: 82,
0x10b84: 82,
0x10b85: 82,
0x10b86: 68,
0x10b87: 68,
0x10b88: 68,
0x10b89: 82,
0x10b8a: 68,
0x10b8b: 68,
0x10b8c: 82,
0x10b8d: 68,
0x10b8e: 82,
0x10b8f: 82,
0x10b90: 68,
0x10b91: 82,
0x10ba9: 82,
0x10baa: 82,
0x10bab: 82,
0x10bac: 82,
0x10bad: 68,
0x10bae: 68,
0x10baf: 85,
0x10d00: 76,
0x10d01: 68,
0x10d02: 68,
0x10d03: 68,
0x10d04: 68,
0x10d05: 68,
0x10d06: 68,
0x10d07: 68,
0x10d08: 68,
0x10d09: 68,
0x10d0a: 68,
0x10d0b: 68,
0x10d0c: 68,
0x10d0d: 68,
0x10d0e: 68,
0x10d0f: 68,
0x10d10: 68,
0x10d11: 68,
0x10d12: 68,
0x10d13: 68,
0x10d14: 68,
0x10d15: 68,
0x10d16: 68,
0x10d17: 68,
0x10d18: 68,
0x10d19: 68,
0x10d1a: 68,
0x10d1b: 68,
0x10d1c: 68,
0x10d1d: 68,
0x10d1e: 68,
0x10d1f: 68,
0x10d20: 68,
0x10d21: 68,
0x10d22: 82,
0x10d23: 68,
0x10f30: 68,
0x10f31: 68,
0x10f32: 68,
0x10f33: 82,
0x10f34: 68,
0x10f35: 68,
0x10f36: 68,
0x10f37: 68,
0x10f38: 68,
0x10f39: 68,
0x10f3a: 68,
0x10f3b: 68,
0x10f3c: 68,
0x10f3d: 68,
0x10f3e: 68,
0x10f3f: 68,
0x10f40: 68,
0x10f41: 68,
0x10f42: 68,
0x10f43: 68,
0x10f44: 68,
0x10f45: 85,
0x10f51: 68,
0x10f52: 68,
0x10f53: 68,
0x10f54: 82,
0x10f70: 68,
0x10f71: 68,
0x10f72: 68,
0x10f73: 68,
0x10f74: 82,
0x10f75: 82,
0x10f76: 68,
0x10f77: 68,
0x10f78: 68,
0x10f79: 68,
0x10f7a: 68,
0x10f7b: 68,
0x10f7c: 68,
0x10f7d: 68,
0x10f7e: 68,
0x10f7f: 68,
0x10f80: 68,
0x10f81: 68,
0x10fb0: 68,
0x10fb1: 85,
0x10fb2: 68,
0x10fb3: 68,
0x10fb4: 82,
0x10fb5: 82,
0x10fb6: 82,
0x10fb7: 85,
0x10fb8: 68,
0x10fb9: 82,
0x10fba: 82,
0x10fbb: 68,
0x10fbc: 68,
0x10fbd: 82,
0x10fbe: 68,
0x10fbf: 68,
0x10fc0: 85,
0x10fc1: 68,
0x10fc2: 82,
0x10fc3: 82,
0x10fc4: 68,
0x10fc5: 85,
0x10fc6: 85,
0x10fc7: 85,
0x10fc8: 85,
0x10fc9: 82,
0x10fca: 68,
0x10fcb: 76,
0x110bd: 85,
0x110cd: 85,
0x1e900: 68,
0x1e901: 68,
0x1e902: 68,
0x1e903: 68,
0x1e904: 68,
0x1e905: 68,
0x1e906: 68,
0x1e907: 68,
0x1e908: 68,
0x1e909: 68,
0x1e90a: 68,
0x1e90b: 68,
0x1e90c: 68,
0x1e90d: 68,
0x1e90e: 68,
0x1e90f: 68,
0x1e910: 68,
0x1e911: 68,
0x1e912: 68,
0x1e913: 68,
0x1e914: 68,
0x1e915: 68,
0x1e916: 68,
0x1e917: 68,
0x1e918: 68,
0x1e919: 68,
0x1e91a: 68,
0x1e91b: 68,
0x1e91c: 68,
0x1e91d: 68,
0x1e91e: 68,
0x1e91f: 68,
0x1e920: 68,
0x1e921: 68,
0x1e922: 68,
0x1e923: 68,
0x1e924: 68,
0x1e925: 68,
0x1e926: 68,
0x1e927: 68,
0x1e928: 68,
0x1e929: 68,
0x1e92a: 68,
0x1e92b: 68,
0x1e92c: 68,
0x1e92d: 68,
0x1e92e: 68,
0x1e92f: 68,
0x1e930: 68,
0x1e931: 68,
0x1e932: 68,
0x1e933: 68,
0x1e934: 68,
0x1e935: 68,
0x1e936: 68,
0x1e937: 68,
0x1e938: 68,
0x1e939: 68,
0x1e93a: 68,
0x1e93b: 68,
0x1e93c: 68,
0x1e93d: 68,
0x1e93e: 68,
0x1e93f: 68,
0x1e940: 68,
0x1e941: 68,
0x1e942: 68,
0x1e943: 68,
0x1e94b: 84,
}
codepoint_classes = {
'PVALID': (
0x2d0000002e,
0x300000003a,
0x610000007b,
0xdf000000f7,
0xf800000100,
0x10100000102,
0x10300000104,
0x10500000106,
0x10700000108,
0x1090000010a,
0x10b0000010c,
0x10d0000010e,
0x10f00000110,
0x11100000112,
0x11300000114,
0x11500000116,
0x11700000118,
0x1190000011a,
0x11b0000011c,
0x11d0000011e,
0x11f00000120,
0x12100000122,
0x12300000124,
0x12500000126,
0x12700000128,
0x1290000012a,
0x12b0000012c,
0x12d0000012e,
0x12f00000130,
0x13100000132,
0x13500000136,
0x13700000139,
0x13a0000013b,
0x13c0000013d,
0x13e0000013f,
0x14200000143,
0x14400000145,
0x14600000147,
0x14800000149,
0x14b0000014c,
0x14d0000014e,
0x14f00000150,
0x15100000152,
0x15300000154,
0x15500000156,
0x15700000158,
0x1590000015a,
0x15b0000015c,
0x15d0000015e,
0x15f00000160,
0x16100000162,
0x16300000164,
0x16500000166,
0x16700000168,
0x1690000016a,
0x16b0000016c,
0x16d0000016e,
0x16f00000170,
0x17100000172,
0x17300000174,
0x17500000176,
0x17700000178,
0x17a0000017b,
0x17c0000017d,
0x17e0000017f,
0x18000000181,
0x18300000184,
0x18500000186,
0x18800000189,
0x18c0000018e,
0x19200000193,
0x19500000196,
0x1990000019c,
0x19e0000019f,
0x1a1000001a2,
0x1a3000001a4,
0x1a5000001a6,
0x1a8000001a9,
0x1aa000001ac,
0x1ad000001ae,
0x1b0000001b1,
0x1b4000001b5,
0x1b6000001b7,
0x1b9000001bc,
0x1bd000001c4,
0x1ce000001cf,
0x1d0000001d1,
0x1d2000001d3,
0x1d4000001d5,
0x1d6000001d7,
0x1d8000001d9,
0x1da000001db,
0x1dc000001de,
0x1df000001e0,
0x1e1000001e2,
0x1e3000001e4,
0x1e5000001e6,
0x1e7000001e8,
0x1e9000001ea,
0x1eb000001ec,
0x1ed000001ee,
0x1ef000001f1,
0x1f5000001f6,
0x1f9000001fa,
0x1fb000001fc,
0x1fd000001fe,
0x1ff00000200,
0x20100000202,
0x20300000204,
0x20500000206,
0x20700000208,
0x2090000020a,
0x20b0000020c,
0x20d0000020e,
0x20f00000210,
0x21100000212,
0x21300000214,
0x21500000216,
0x21700000218,
0x2190000021a,
0x21b0000021c,
0x21d0000021e,
0x21f00000220,
0x22100000222,
0x22300000224,
0x22500000226,
0x22700000228,
0x2290000022a,
0x22b0000022c,
0x22d0000022e,
0x22f00000230,
0x23100000232,
0x2330000023a,
0x23c0000023d,
0x23f00000241,
0x24200000243,
0x24700000248,
0x2490000024a,
0x24b0000024c,
0x24d0000024e,
0x24f000002b0,
0x2b9000002c2,
0x2c6000002d2,
0x2ec000002ed,
0x2ee000002ef,
0x30000000340,
0x34200000343,
0x3460000034f,
0x35000000370,
0x37100000372,
0x37300000374,
0x37700000378,
0x37b0000037e,
0x39000000391,
0x3ac000003cf,
0x3d7000003d8,
0x3d9000003da,
0x3db000003dc,
0x3dd000003de,
0x3df000003e0,
0x3e1000003e2,
0x3e3000003e4,
0x3e5000003e6,
0x3e7000003e8,
0x3e9000003ea,
0x3eb000003ec,
0x3ed000003ee,
0x3ef000003f0,
0x3f3000003f4,
0x3f8000003f9,
0x3fb000003fd,
0x43000000460,
0x46100000462,
0x46300000464,
0x46500000466,
0x46700000468,
0x4690000046a,
0x46b0000046c,
0x46d0000046e,
0x46f00000470,
0x47100000472,
0x47300000474,
0x47500000476,
0x47700000478,
0x4790000047a,
0x47b0000047c,
0x47d0000047e,
0x47f00000480,
0x48100000482,
0x48300000488,
0x48b0000048c,
0x48d0000048e,
0x48f00000490,
0x49100000492,
0x49300000494,
0x49500000496,
0x49700000498,
0x4990000049a,
0x49b0000049c,
0x49d0000049e,
0x49f000004a0,
0x4a1000004a2,
0x4a3000004a4,
0x4a5000004a6,
0x4a7000004a8,
0x4a9000004aa,
0x4ab000004ac,
0x4ad000004ae,
0x4af000004b0,
0x4b1000004b2,
0x4b3000004b4,
0x4b5000004b6,
0x4b7000004b8,
0x4b9000004ba,
0x4bb000004bc,
0x4bd000004be,
0x4bf000004c0,
0x4c2000004c3,
0x4c4000004c5,
0x4c6000004c7,
0x4c8000004c9,
0x4ca000004cb,
0x4cc000004cd,
0x4ce000004d0,
0x4d1000004d2,
0x4d3000004d4,
0x4d5000004d6,
0x4d7000004d8,
0x4d9000004da,
0x4db000004dc,
0x4dd000004de,
0x4df000004e0,
0x4e1000004e2,
0x4e3000004e4,
0x4e5000004e6,
0x4e7000004e8,
0x4e9000004ea,
0x4eb000004ec,
0x4ed000004ee,
0x4ef000004f0,
0x4f1000004f2,
0x4f3000004f4,
0x4f5000004f6,
0x4f7000004f8,
0x4f9000004fa,
0x4fb000004fc,
0x4fd000004fe,
0x4ff00000500,
0x50100000502,
0x50300000504,
0x50500000506,
0x50700000508,
0x5090000050a,
0x50b0000050c,
0x50d0000050e,
0x50f00000510,
0x51100000512,
0x51300000514,
0x51500000516,
0x51700000518,
0x5190000051a,
0x51b0000051c,
0x51d0000051e,
0x51f00000520,
0x52100000522,
0x52300000524,
0x52500000526,
0x52700000528,
0x5290000052a,
0x52b0000052c,
0x52d0000052e,
0x52f00000530,
0x5590000055a,
0x56000000587,
0x58800000589,
0x591000005be,
0x5bf000005c0,
0x5c1000005c3,
0x5c4000005c6,
0x5c7000005c8,
0x5d0000005eb,
0x5ef000005f3,
0x6100000061b,
0x62000000640,
0x64100000660,
0x66e00000675,
0x679000006d4,
0x6d5000006dd,
0x6df000006e9,
0x6ea000006f0,
0x6fa00000700,
0x7100000074b,
0x74d000007b2,
0x7c0000007f6,
0x7fd000007fe,
0x8000000082e,
0x8400000085c,
0x8600000086b,
0x87000000888,
0x8890000088f,
0x898000008e2,
0x8e300000958,
0x96000000964,
0x96600000970,
0x97100000984,
0x9850000098d,
0x98f00000991,
0x993000009a9,
0x9aa000009b1,
0x9b2000009b3,
0x9b6000009ba,
0x9bc000009c5,
0x9c7000009c9,
0x9cb000009cf,
0x9d7000009d8,
0x9e0000009e4,
0x9e6000009f2,
0x9fc000009fd,
0x9fe000009ff,
0xa0100000a04,
0xa0500000a0b,
0xa0f00000a11,
0xa1300000a29,
0xa2a00000a31,
0xa3200000a33,
0xa3500000a36,
0xa3800000a3a,
0xa3c00000a3d,
0xa3e00000a43,
0xa4700000a49,
0xa4b00000a4e,
0xa5100000a52,
0xa5c00000a5d,
0xa6600000a76,
0xa8100000a84,
0xa8500000a8e,
0xa8f00000a92,
0xa9300000aa9,
0xaaa00000ab1,
0xab200000ab4,
0xab500000aba,
0xabc00000ac6,
0xac700000aca,
0xacb00000ace,
0xad000000ad1,
0xae000000ae4,
0xae600000af0,
0xaf900000b00,
0xb0100000b04,
0xb0500000b0d,
0xb0f00000b11,
0xb1300000b29,
0xb2a00000b31,
0xb3200000b34,
0xb3500000b3a,
0xb3c00000b45,
0xb4700000b49,
0xb4b00000b4e,
0xb5500000b58,
0xb5f00000b64,
0xb6600000b70,
0xb7100000b72,
0xb8200000b84,
0xb8500000b8b,
0xb8e00000b91,
0xb9200000b96,
0xb9900000b9b,
0xb9c00000b9d,
0xb9e00000ba0,
0xba300000ba5,
0xba800000bab,
0xbae00000bba,
0xbbe00000bc3,
0xbc600000bc9,
0xbca00000bce,
0xbd000000bd1,
0xbd700000bd8,
0xbe600000bf0,
0xc0000000c0d,
0xc0e00000c11,
0xc1200000c29,
0xc2a00000c3a,
0xc3c00000c45,
0xc4600000c49,
0xc4a00000c4e,
0xc5500000c57,
0xc5800000c5b,
0xc5d00000c5e,
0xc6000000c64,
0xc6600000c70,
0xc8000000c84,
0xc8500000c8d,
0xc8e00000c91,
0xc9200000ca9,
0xcaa00000cb4,
0xcb500000cba,
0xcbc00000cc5,
0xcc600000cc9,
0xcca00000cce,
0xcd500000cd7,
0xcdd00000cdf,
0xce000000ce4,
0xce600000cf0,
0xcf100000cf4,
0xd0000000d0d,
0xd0e00000d11,
0xd1200000d45,
0xd4600000d49,
0xd4a00000d4f,
0xd5400000d58,
0xd5f00000d64,
0xd6600000d70,
0xd7a00000d80,
0xd8100000d84,
0xd8500000d97,
0xd9a00000db2,
0xdb300000dbc,
0xdbd00000dbe,
0xdc000000dc7,
0xdca00000dcb,
0xdcf00000dd5,
0xdd600000dd7,
0xdd800000de0,
0xde600000df0,
0xdf200000df4,
0xe0100000e33,
0xe3400000e3b,
0xe4000000e4f,
0xe5000000e5a,
0xe8100000e83,
0xe8400000e85,
0xe8600000e8b,
0xe8c00000ea4,
0xea500000ea6,
0xea700000eb3,
0xeb400000ebe,
0xec000000ec5,
0xec600000ec7,
0xec800000ecf,
0xed000000eda,
0xede00000ee0,
0xf0000000f01,
0xf0b00000f0c,
0xf1800000f1a,
0xf2000000f2a,
0xf3500000f36,
0xf3700000f38,
0xf3900000f3a,
0xf3e00000f43,
0xf4400000f48,
0xf4900000f4d,
0xf4e00000f52,
0xf5300000f57,
0xf5800000f5c,
0xf5d00000f69,
0xf6a00000f6d,
0xf7100000f73,
0xf7400000f75,
0xf7a00000f81,
0xf8200000f85,
0xf8600000f93,
0xf9400000f98,
0xf9900000f9d,
0xf9e00000fa2,
0xfa300000fa7,
0xfa800000fac,
0xfad00000fb9,
0xfba00000fbd,
0xfc600000fc7,
0x10000000104a,
0x10500000109e,
0x10d0000010fb,
0x10fd00001100,
0x120000001249,
0x124a0000124e,
0x125000001257,
0x125800001259,
0x125a0000125e,
0x126000001289,
0x128a0000128e,
0x1290000012b1,
0x12b2000012b6,
0x12b8000012bf,
0x12c0000012c1,
0x12c2000012c6,
0x12c8000012d7,
0x12d800001311,
0x131200001316,
0x13180000135b,
0x135d00001360,
0x138000001390,
0x13a0000013f6,
0x14010000166d,
0x166f00001680,
0x16810000169b,
0x16a0000016eb,
0x16f1000016f9,
0x170000001716,
0x171f00001735,
0x174000001754,
0x17600000176d,
0x176e00001771,
0x177200001774,
0x1780000017b4,
0x17b6000017d4,
0x17d7000017d8,
0x17dc000017de,
0x17e0000017ea,
0x18100000181a,
0x182000001879,
0x1880000018ab,
0x18b0000018f6,
0x19000000191f,
0x19200000192c,
0x19300000193c,
0x19460000196e,
0x197000001975,
0x1980000019ac,
0x19b0000019ca,
0x19d0000019da,
0x1a0000001a1c,
0x1a2000001a5f,
0x1a6000001a7d,
0x1a7f00001a8a,
0x1a9000001a9a,
0x1aa700001aa8,
0x1ab000001abe,
0x1abf00001acf,
0x1b0000001b4d,
0x1b5000001b5a,
0x1b6b00001b74,
0x1b8000001bf4,
0x1c0000001c38,
0x1c4000001c4a,
0x1c4d00001c7e,
0x1cd000001cd3,
0x1cd400001cfb,
0x1d0000001d2c,
0x1d2f00001d30,
0x1d3b00001d3c,
0x1d4e00001d4f,
0x1d6b00001d78,
0x1d7900001d9b,
0x1dc000001e00,
0x1e0100001e02,
0x1e0300001e04,
0x1e0500001e06,
0x1e0700001e08,
0x1e0900001e0a,
0x1e0b00001e0c,
0x1e0d00001e0e,
0x1e0f00001e10,
0x1e1100001e12,
0x1e1300001e14,
0x1e1500001e16,
0x1e1700001e18,
0x1e1900001e1a,
0x1e1b00001e1c,
0x1e1d00001e1e,
0x1e1f00001e20,
0x1e2100001e22,
0x1e2300001e24,
0x1e2500001e26,
0x1e2700001e28,
0x1e2900001e2a,
0x1e2b00001e2c,
0x1e2d00001e2e,
0x1e2f00001e30,
0x1e3100001e32,
0x1e3300001e34,
0x1e3500001e36,
0x1e3700001e38,
0x1e3900001e3a,
0x1e3b00001e3c,
0x1e3d00001e3e,
0x1e3f00001e40,
0x1e4100001e42,
0x1e4300001e44,
0x1e4500001e46,
0x1e4700001e48,
0x1e4900001e4a,
0x1e4b00001e4c,
0x1e4d00001e4e,
0x1e4f00001e50,
0x1e5100001e52,
0x1e5300001e54,
0x1e5500001e56,
0x1e5700001e58,
0x1e5900001e5a,
0x1e5b00001e5c,
0x1e5d00001e5e,
0x1e5f00001e60,
0x1e6100001e62,
0x1e6300001e64,
0x1e6500001e66,
0x1e6700001e68,
0x1e6900001e6a,
0x1e6b00001e6c,
0x1e6d00001e6e,
0x1e6f00001e70,
0x1e7100001e72,
0x1e7300001e74,
0x1e7500001e76,
0x1e7700001e78,
0x1e7900001e7a,
0x1e7b00001e7c,
0x1e7d00001e7e,
0x1e7f00001e80,
0x1e8100001e82,
0x1e8300001e84,
0x1e8500001e86,
0x1e8700001e88,
0x1e8900001e8a,
0x1e8b00001e8c,
0x1e8d00001e8e,
0x1e8f00001e90,
0x1e9100001e92,
0x1e9300001e94,
0x1e9500001e9a,
0x1e9c00001e9e,
0x1e9f00001ea0,
0x1ea100001ea2,
0x1ea300001ea4,
0x1ea500001ea6,
0x1ea700001ea8,
0x1ea900001eaa,
0x1eab00001eac,
0x1ead00001eae,
0x1eaf00001eb0,
0x1eb100001eb2,
0x1eb300001eb4,
0x1eb500001eb6,
0x1eb700001eb8,
0x1eb900001eba,
0x1ebb00001ebc,
0x1ebd00001ebe,
0x1ebf00001ec0,
0x1ec100001ec2,
0x1ec300001ec4,
0x1ec500001ec6,
0x1ec700001ec8,
0x1ec900001eca,
0x1ecb00001ecc,
0x1ecd00001ece,
0x1ecf00001ed0,
0x1ed100001ed2,
0x1ed300001ed4,
0x1ed500001ed6,
0x1ed700001ed8,
0x1ed900001eda,
0x1edb00001edc,
0x1edd00001ede,
0x1edf00001ee0,
0x1ee100001ee2,
0x1ee300001ee4,
0x1ee500001ee6,
0x1ee700001ee8,
0x1ee900001eea,
0x1eeb00001eec,
0x1eed00001eee,
0x1eef00001ef0,
0x1ef100001ef2,
0x1ef300001ef4,
0x1ef500001ef6,
0x1ef700001ef8,
0x1ef900001efa,
0x1efb00001efc,
0x1efd00001efe,
0x1eff00001f08,
0x1f1000001f16,
0x1f2000001f28,
0x1f3000001f38,
0x1f4000001f46,
0x1f5000001f58,
0x1f6000001f68,
0x1f7000001f71,
0x1f7200001f73,
0x1f7400001f75,
0x1f7600001f77,
0x1f7800001f79,
0x1f7a00001f7b,
0x1f7c00001f7d,
0x1fb000001fb2,
0x1fb600001fb7,
0x1fc600001fc7,
0x1fd000001fd3,
0x1fd600001fd8,
0x1fe000001fe3,
0x1fe400001fe8,
0x1ff600001ff7,
0x214e0000214f,
0x218400002185,
0x2c3000002c60,
0x2c6100002c62,
0x2c6500002c67,
0x2c6800002c69,
0x2c6a00002c6b,
0x2c6c00002c6d,
0x2c7100002c72,
0x2c7300002c75,
0x2c7600002c7c,
0x2c8100002c82,
0x2c8300002c84,
0x2c8500002c86,
0x2c8700002c88,
0x2c8900002c8a,
0x2c8b00002c8c,
0x2c8d00002c8e,
0x2c8f00002c90,
0x2c9100002c92,
0x2c9300002c94,
0x2c9500002c96,
0x2c9700002c98,
0x2c9900002c9a,
0x2c9b00002c9c,
0x2c9d00002c9e,
0x2c9f00002ca0,
0x2ca100002ca2,
0x2ca300002ca4,
0x2ca500002ca6,
0x2ca700002ca8,
0x2ca900002caa,
0x2cab00002cac,
0x2cad00002cae,
0x2caf00002cb0,
0x2cb100002cb2,
0x2cb300002cb4,
0x2cb500002cb6,
0x2cb700002cb8,
0x2cb900002cba,
0x2cbb00002cbc,
0x2cbd00002cbe,
0x2cbf00002cc0,
0x2cc100002cc2,
0x2cc300002cc4,
0x2cc500002cc6,
0x2cc700002cc8,
0x2cc900002cca,
0x2ccb00002ccc,
0x2ccd00002cce,
0x2ccf00002cd0,
0x2cd100002cd2,
0x2cd300002cd4,
0x2cd500002cd6,
0x2cd700002cd8,
0x2cd900002cda,
0x2cdb00002cdc,
0x2cdd00002cde,
0x2cdf00002ce0,
0x2ce100002ce2,
0x2ce300002ce5,
0x2cec00002ced,
0x2cee00002cf2,
0x2cf300002cf4,
0x2d0000002d26,
0x2d2700002d28,
0x2d2d00002d2e,
0x2d3000002d68,
0x2d7f00002d97,
0x2da000002da7,
0x2da800002daf,
0x2db000002db7,
0x2db800002dbf,
0x2dc000002dc7,
0x2dc800002dcf,
0x2dd000002dd7,
0x2dd800002ddf,
0x2de000002e00,
0x2e2f00002e30,
0x300500003008,
0x302a0000302e,
0x303c0000303d,
0x304100003097,
0x30990000309b,
0x309d0000309f,
0x30a1000030fb,
0x30fc000030ff,
0x310500003130,
0x31a0000031c0,
0x31f000003200,
0x340000004dc0,
0x4e000000a48d,
0xa4d00000a4fe,
0xa5000000a60d,
0xa6100000a62c,
0xa6410000a642,
0xa6430000a644,
0xa6450000a646,
0xa6470000a648,
0xa6490000a64a,
0xa64b0000a64c,
0xa64d0000a64e,
0xa64f0000a650,
0xa6510000a652,
0xa6530000a654,
0xa6550000a656,
0xa6570000a658,
0xa6590000a65a,
0xa65b0000a65c,
0xa65d0000a65e,
0xa65f0000a660,
0xa6610000a662,
0xa6630000a664,
0xa6650000a666,
0xa6670000a668,
0xa6690000a66a,
0xa66b0000a66c,
0xa66d0000a670,
0xa6740000a67e,
0xa67f0000a680,
0xa6810000a682,
0xa6830000a684,
0xa6850000a686,
0xa6870000a688,
0xa6890000a68a,
0xa68b0000a68c,
0xa68d0000a68e,
0xa68f0000a690,
0xa6910000a692,
0xa6930000a694,
0xa6950000a696,
0xa6970000a698,
0xa6990000a69a,
0xa69b0000a69c,
0xa69e0000a6e6,
0xa6f00000a6f2,
0xa7170000a720,
0xa7230000a724,
0xa7250000a726,
0xa7270000a728,
0xa7290000a72a,
0xa72b0000a72c,
0xa72d0000a72e,
0xa72f0000a732,
0xa7330000a734,
0xa7350000a736,
0xa7370000a738,
0xa7390000a73a,
0xa73b0000a73c,
0xa73d0000a73e,
0xa73f0000a740,
0xa7410000a742,
0xa7430000a744,
0xa7450000a746,
0xa7470000a748,
0xa7490000a74a,
0xa74b0000a74c,
0xa74d0000a74e,
0xa74f0000a750,
0xa7510000a752,
0xa7530000a754,
0xa7550000a756,
0xa7570000a758,
0xa7590000a75a,
0xa75b0000a75c,
0xa75d0000a75e,
0xa75f0000a760,
0xa7610000a762,
0xa7630000a764,
0xa7650000a766,
0xa7670000a768,
0xa7690000a76a,
0xa76b0000a76c,
0xa76d0000a76e,
0xa76f0000a770,
0xa7710000a779,
0xa77a0000a77b,
0xa77c0000a77d,
0xa77f0000a780,
0xa7810000a782,
0xa7830000a784,
0xa7850000a786,
0xa7870000a789,
0xa78c0000a78d,
0xa78e0000a790,
0xa7910000a792,
0xa7930000a796,
0xa7970000a798,
0xa7990000a79a,
0xa79b0000a79c,
0xa79d0000a79e,
0xa79f0000a7a0,
0xa7a10000a7a2,
0xa7a30000a7a4,
0xa7a50000a7a6,
0xa7a70000a7a8,
0xa7a90000a7aa,
0xa7af0000a7b0,
0xa7b50000a7b6,
0xa7b70000a7b8,
0xa7b90000a7ba,
0xa7bb0000a7bc,
0xa7bd0000a7be,
0xa7bf0000a7c0,
0xa7c10000a7c2,
0xa7c30000a7c4,
0xa7c80000a7c9,
0xa7ca0000a7cb,
0xa7d10000a7d2,
0xa7d30000a7d4,
0xa7d50000a7d6,
0xa7d70000a7d8,
0xa7d90000a7da,
0xa7f20000a7f5,
0xa7f60000a7f8,
0xa7fa0000a828,
0xa82c0000a82d,
0xa8400000a874,
0xa8800000a8c6,
0xa8d00000a8da,
0xa8e00000a8f8,
0xa8fb0000a8fc,
0xa8fd0000a92e,
0xa9300000a954,
0xa9800000a9c1,
0xa9cf0000a9da,
0xa9e00000a9ff,
0xaa000000aa37,
0xaa400000aa4e,
0xaa500000aa5a,
0xaa600000aa77,
0xaa7a0000aac3,
0xaadb0000aade,
0xaae00000aaf0,
0xaaf20000aaf7,
0xab010000ab07,
0xab090000ab0f,
0xab110000ab17,
0xab200000ab27,
0xab280000ab2f,
0xab300000ab5b,
0xab600000ab69,
0xabc00000abeb,
0xabec0000abee,
0xabf00000abfa,
0xac000000d7a4,
0xfa0e0000fa10,
0xfa110000fa12,
0xfa130000fa15,
0xfa1f0000fa20,
0xfa210000fa22,
0xfa230000fa25,
0xfa270000fa2a,
0xfb1e0000fb1f,
0xfe200000fe30,
0xfe730000fe74,
0x100000001000c,
0x1000d00010027,
0x100280001003b,
0x1003c0001003e,
0x1003f0001004e,
0x100500001005e,
0x10080000100fb,
0x101fd000101fe,
0x102800001029d,
0x102a0000102d1,
0x102e0000102e1,
0x1030000010320,
0x1032d00010341,
0x103420001034a,
0x103500001037b,
0x103800001039e,
0x103a0000103c4,
0x103c8000103d0,
0x104280001049e,
0x104a0000104aa,
0x104d8000104fc,
0x1050000010528,
0x1053000010564,
0x10597000105a2,
0x105a3000105b2,
0x105b3000105ba,
0x105bb000105bd,
0x1060000010737,
0x1074000010756,
0x1076000010768,
0x1078000010786,
0x10787000107b1,
0x107b2000107bb,
0x1080000010806,
0x1080800010809,
0x1080a00010836,
0x1083700010839,
0x1083c0001083d,
0x1083f00010856,
0x1086000010877,
0x108800001089f,
0x108e0000108f3,
0x108f4000108f6,
0x1090000010916,
0x109200001093a,
0x10980000109b8,
0x109be000109c0,
0x10a0000010a04,
0x10a0500010a07,
0x10a0c00010a14,
0x10a1500010a18,
0x10a1900010a36,
0x10a3800010a3b,
0x10a3f00010a40,
0x10a6000010a7d,
0x10a8000010a9d,
0x10ac000010ac8,
0x10ac900010ae7,
0x10b0000010b36,
0x10b4000010b56,
0x10b6000010b73,
0x10b8000010b92,
0x10c0000010c49,
0x10cc000010cf3,
0x10d0000010d28,
0x10d3000010d3a,
0x10e8000010eaa,
0x10eab00010ead,
0x10eb000010eb2,
0x10efd00010f1d,
0x10f2700010f28,
0x10f3000010f51,
0x10f7000010f86,
0x10fb000010fc5,
0x10fe000010ff7,
0x1100000011047,
0x1106600011076,
0x1107f000110bb,
0x110c2000110c3,
0x110d0000110e9,
0x110f0000110fa,
0x1110000011135,
0x1113600011140,
0x1114400011148,
0x1115000011174,
0x1117600011177,
0x11180000111c5,
0x111c9000111cd,
0x111ce000111db,
0x111dc000111dd,
0x1120000011212,
0x1121300011238,
0x1123e00011242,
0x1128000011287,
0x1128800011289,
0x1128a0001128e,
0x1128f0001129e,
0x1129f000112a9,
0x112b0000112eb,
0x112f0000112fa,
0x1130000011304,
0x113050001130d,
0x1130f00011311,
0x1131300011329,
0x1132a00011331,
0x1133200011334,
0x113350001133a,
0x1133b00011345,
0x1134700011349,
0x1134b0001134e,
0x1135000011351,
0x1135700011358,
0x1135d00011364,
0x113660001136d,
0x1137000011375,
0x114000001144b,
0x114500001145a,
0x1145e00011462,
0x11480000114c6,
0x114c7000114c8,
0x114d0000114da,
0x11580000115b6,
0x115b8000115c1,
0x115d8000115de,
0x1160000011641,
0x1164400011645,
0x116500001165a,
0x11680000116b9,
0x116c0000116ca,
0x117000001171b,
0x1171d0001172c,
0x117300001173a,
0x1174000011747,
0x118000001183b,
0x118c0000118ea,
0x118ff00011907,
0x119090001190a,
0x1190c00011914,
0x1191500011917,
0x1191800011936,
0x1193700011939,
0x1193b00011944,
0x119500001195a,
0x119a0000119a8,
0x119aa000119d8,
0x119da000119e2,
0x119e3000119e5,
0x11a0000011a3f,
0x11a4700011a48,
0x11a5000011a9a,
0x11a9d00011a9e,
0x11ab000011af9,
0x11c0000011c09,
0x11c0a00011c37,
0x11c3800011c41,
0x11c5000011c5a,
0x11c7200011c90,
0x11c9200011ca8,
0x11ca900011cb7,
0x11d0000011d07,
0x11d0800011d0a,
0x11d0b00011d37,
0x11d3a00011d3b,
0x11d3c00011d3e,
0x11d3f00011d48,
0x11d5000011d5a,
0x11d6000011d66,
0x11d6700011d69,
0x11d6a00011d8f,
0x11d9000011d92,
0x11d9300011d99,
0x11da000011daa,
0x11ee000011ef7,
0x11f0000011f11,
0x11f1200011f3b,
0x11f3e00011f43,
0x11f5000011f5a,
0x11fb000011fb1,
0x120000001239a,
0x1248000012544,
0x12f9000012ff1,
0x1300000013430,
0x1344000013456,
0x1440000014647,
0x1680000016a39,
0x16a4000016a5f,
0x16a6000016a6a,
0x16a7000016abf,
0x16ac000016aca,
0x16ad000016aee,
0x16af000016af5,
0x16b0000016b37,
0x16b4000016b44,
0x16b5000016b5a,
0x16b6300016b78,
0x16b7d00016b90,
0x16e6000016e80,
0x16f0000016f4b,
0x16f4f00016f88,
0x16f8f00016fa0,
0x16fe000016fe2,
0x16fe300016fe5,
0x16ff000016ff2,
0x17000000187f8,
0x1880000018cd6,
0x18d0000018d09,
0x1aff00001aff4,
0x1aff50001affc,
0x1affd0001afff,
0x1b0000001b123,
0x1b1320001b133,
0x1b1500001b153,
0x1b1550001b156,
0x1b1640001b168,
0x1b1700001b2fc,
0x1bc000001bc6b,
0x1bc700001bc7d,
0x1bc800001bc89,
0x1bc900001bc9a,
0x1bc9d0001bc9f,
0x1cf000001cf2e,
0x1cf300001cf47,
0x1da000001da37,
0x1da3b0001da6d,
0x1da750001da76,
0x1da840001da85,
0x1da9b0001daa0,
0x1daa10001dab0,
0x1df000001df1f,
0x1df250001df2b,
0x1e0000001e007,
0x1e0080001e019,
0x1e01b0001e022,
0x1e0230001e025,
0x1e0260001e02b,
0x1e0300001e06e,
0x1e08f0001e090,
0x1e1000001e12d,
0x1e1300001e13e,
0x1e1400001e14a,
0x1e14e0001e14f,
0x1e2900001e2af,
0x1e2c00001e2fa,
0x1e4d00001e4fa,
0x1e7e00001e7e7,
0x1e7e80001e7ec,
0x1e7ed0001e7ef,
0x1e7f00001e7ff,
0x1e8000001e8c5,
0x1e8d00001e8d7,
0x1e9220001e94c,
0x1e9500001e95a,
0x200000002a6e0,
0x2a7000002b73a,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
0x300000003134b,
0x31350000323b0,
),
'CONTEXTJ': (
0x200c0000200e,
),
'CONTEXTO': (
0xb7000000b8,
0x37500000376,
0x5f3000005f5,
0x6600000066a,
0x6f0000006fa,
0x30fb000030fc,
),
}
| 44,375 | Python | 19.620818 | 57 | 0.554096 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/uts46data.py | # This file is automatically generated by tools/idna-data
# vim: set fileencoding=utf-8 :
from typing import List, Tuple, Union
"""IDNA Mapping Table from UTS46."""
__version__ = '15.0.0'
def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x0, '3'),
(0x1, '3'),
(0x2, '3'),
(0x3, '3'),
(0x4, '3'),
(0x5, '3'),
(0x6, '3'),
(0x7, '3'),
(0x8, '3'),
(0x9, '3'),
(0xA, '3'),
(0xB, '3'),
(0xC, '3'),
(0xD, '3'),
(0xE, '3'),
(0xF, '3'),
(0x10, '3'),
(0x11, '3'),
(0x12, '3'),
(0x13, '3'),
(0x14, '3'),
(0x15, '3'),
(0x16, '3'),
(0x17, '3'),
(0x18, '3'),
(0x19, '3'),
(0x1A, '3'),
(0x1B, '3'),
(0x1C, '3'),
(0x1D, '3'),
(0x1E, '3'),
(0x1F, '3'),
(0x20, '3'),
(0x21, '3'),
(0x22, '3'),
(0x23, '3'),
(0x24, '3'),
(0x25, '3'),
(0x26, '3'),
(0x27, '3'),
(0x28, '3'),
(0x29, '3'),
(0x2A, '3'),
(0x2B, '3'),
(0x2C, '3'),
(0x2D, 'V'),
(0x2E, 'V'),
(0x2F, '3'),
(0x30, 'V'),
(0x31, 'V'),
(0x32, 'V'),
(0x33, 'V'),
(0x34, 'V'),
(0x35, 'V'),
(0x36, 'V'),
(0x37, 'V'),
(0x38, 'V'),
(0x39, 'V'),
(0x3A, '3'),
(0x3B, '3'),
(0x3C, '3'),
(0x3D, '3'),
(0x3E, '3'),
(0x3F, '3'),
(0x40, '3'),
(0x41, 'M', 'a'),
(0x42, 'M', 'b'),
(0x43, 'M', 'c'),
(0x44, 'M', 'd'),
(0x45, 'M', 'e'),
(0x46, 'M', 'f'),
(0x47, 'M', 'g'),
(0x48, 'M', 'h'),
(0x49, 'M', 'i'),
(0x4A, 'M', 'j'),
(0x4B, 'M', 'k'),
(0x4C, 'M', 'l'),
(0x4D, 'M', 'm'),
(0x4E, 'M', 'n'),
(0x4F, 'M', 'o'),
(0x50, 'M', 'p'),
(0x51, 'M', 'q'),
(0x52, 'M', 'r'),
(0x53, 'M', 's'),
(0x54, 'M', 't'),
(0x55, 'M', 'u'),
(0x56, 'M', 'v'),
(0x57, 'M', 'w'),
(0x58, 'M', 'x'),
(0x59, 'M', 'y'),
(0x5A, 'M', 'z'),
(0x5B, '3'),
(0x5C, '3'),
(0x5D, '3'),
(0x5E, '3'),
(0x5F, '3'),
(0x60, '3'),
(0x61, 'V'),
(0x62, 'V'),
(0x63, 'V'),
]
def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x64, 'V'),
(0x65, 'V'),
(0x66, 'V'),
(0x67, 'V'),
(0x68, 'V'),
(0x69, 'V'),
(0x6A, 'V'),
(0x6B, 'V'),
(0x6C, 'V'),
(0x6D, 'V'),
(0x6E, 'V'),
(0x6F, 'V'),
(0x70, 'V'),
(0x71, 'V'),
(0x72, 'V'),
(0x73, 'V'),
(0x74, 'V'),
(0x75, 'V'),
(0x76, 'V'),
(0x77, 'V'),
(0x78, 'V'),
(0x79, 'V'),
(0x7A, 'V'),
(0x7B, '3'),
(0x7C, '3'),
(0x7D, '3'),
(0x7E, '3'),
(0x7F, '3'),
(0x80, 'X'),
(0x81, 'X'),
(0x82, 'X'),
(0x83, 'X'),
(0x84, 'X'),
(0x85, 'X'),
(0x86, 'X'),
(0x87, 'X'),
(0x88, 'X'),
(0x89, 'X'),
(0x8A, 'X'),
(0x8B, 'X'),
(0x8C, 'X'),
(0x8D, 'X'),
(0x8E, 'X'),
(0x8F, 'X'),
(0x90, 'X'),
(0x91, 'X'),
(0x92, 'X'),
(0x93, 'X'),
(0x94, 'X'),
(0x95, 'X'),
(0x96, 'X'),
(0x97, 'X'),
(0x98, 'X'),
(0x99, 'X'),
(0x9A, 'X'),
(0x9B, 'X'),
(0x9C, 'X'),
(0x9D, 'X'),
(0x9E, 'X'),
(0x9F, 'X'),
(0xA0, '3', ' '),
(0xA1, 'V'),
(0xA2, 'V'),
(0xA3, 'V'),
(0xA4, 'V'),
(0xA5, 'V'),
(0xA6, 'V'),
(0xA7, 'V'),
(0xA8, '3', ' ̈'),
(0xA9, 'V'),
(0xAA, 'M', 'a'),
(0xAB, 'V'),
(0xAC, 'V'),
(0xAD, 'I'),
(0xAE, 'V'),
(0xAF, '3', ' ̄'),
(0xB0, 'V'),
(0xB1, 'V'),
(0xB2, 'M', '2'),
(0xB3, 'M', '3'),
(0xB4, '3', ' ́'),
(0xB5, 'M', 'μ'),
(0xB6, 'V'),
(0xB7, 'V'),
(0xB8, '3', ' ̧'),
(0xB9, 'M', '1'),
(0xBA, 'M', 'o'),
(0xBB, 'V'),
(0xBC, 'M', '1⁄4'),
(0xBD, 'M', '1⁄2'),
(0xBE, 'M', '3⁄4'),
(0xBF, 'V'),
(0xC0, 'M', 'à'),
(0xC1, 'M', 'á'),
(0xC2, 'M', 'â'),
(0xC3, 'M', 'ã'),
(0xC4, 'M', 'ä'),
(0xC5, 'M', 'å'),
(0xC6, 'M', 'æ'),
(0xC7, 'M', 'ç'),
]
def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xC8, 'M', 'è'),
(0xC9, 'M', 'é'),
(0xCA, 'M', 'ê'),
(0xCB, 'M', 'ë'),
(0xCC, 'M', 'ì'),
(0xCD, 'M', 'í'),
(0xCE, 'M', 'î'),
(0xCF, 'M', 'ï'),
(0xD0, 'M', 'ð'),
(0xD1, 'M', 'ñ'),
(0xD2, 'M', 'ò'),
(0xD3, 'M', 'ó'),
(0xD4, 'M', 'ô'),
(0xD5, 'M', 'õ'),
(0xD6, 'M', 'ö'),
(0xD7, 'V'),
(0xD8, 'M', 'ø'),
(0xD9, 'M', 'ù'),
(0xDA, 'M', 'ú'),
(0xDB, 'M', 'û'),
(0xDC, 'M', 'ü'),
(0xDD, 'M', 'ý'),
(0xDE, 'M', 'þ'),
(0xDF, 'D', 'ss'),
(0xE0, 'V'),
(0xE1, 'V'),
(0xE2, 'V'),
(0xE3, 'V'),
(0xE4, 'V'),
(0xE5, 'V'),
(0xE6, 'V'),
(0xE7, 'V'),
(0xE8, 'V'),
(0xE9, 'V'),
(0xEA, 'V'),
(0xEB, 'V'),
(0xEC, 'V'),
(0xED, 'V'),
(0xEE, 'V'),
(0xEF, 'V'),
(0xF0, 'V'),
(0xF1, 'V'),
(0xF2, 'V'),
(0xF3, 'V'),
(0xF4, 'V'),
(0xF5, 'V'),
(0xF6, 'V'),
(0xF7, 'V'),
(0xF8, 'V'),
(0xF9, 'V'),
(0xFA, 'V'),
(0xFB, 'V'),
(0xFC, 'V'),
(0xFD, 'V'),
(0xFE, 'V'),
(0xFF, 'V'),
(0x100, 'M', 'ā'),
(0x101, 'V'),
(0x102, 'M', 'ă'),
(0x103, 'V'),
(0x104, 'M', 'ą'),
(0x105, 'V'),
(0x106, 'M', 'ć'),
(0x107, 'V'),
(0x108, 'M', 'ĉ'),
(0x109, 'V'),
(0x10A, 'M', 'ċ'),
(0x10B, 'V'),
(0x10C, 'M', 'č'),
(0x10D, 'V'),
(0x10E, 'M', 'ď'),
(0x10F, 'V'),
(0x110, 'M', 'đ'),
(0x111, 'V'),
(0x112, 'M', 'ē'),
(0x113, 'V'),
(0x114, 'M', 'ĕ'),
(0x115, 'V'),
(0x116, 'M', 'ė'),
(0x117, 'V'),
(0x118, 'M', 'ę'),
(0x119, 'V'),
(0x11A, 'M', 'ě'),
(0x11B, 'V'),
(0x11C, 'M', 'ĝ'),
(0x11D, 'V'),
(0x11E, 'M', 'ğ'),
(0x11F, 'V'),
(0x120, 'M', 'ġ'),
(0x121, 'V'),
(0x122, 'M', 'ģ'),
(0x123, 'V'),
(0x124, 'M', 'ĥ'),
(0x125, 'V'),
(0x126, 'M', 'ħ'),
(0x127, 'V'),
(0x128, 'M', 'ĩ'),
(0x129, 'V'),
(0x12A, 'M', 'ī'),
(0x12B, 'V'),
]
def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x12C, 'M', 'ĭ'),
(0x12D, 'V'),
(0x12E, 'M', 'į'),
(0x12F, 'V'),
(0x130, 'M', 'i̇'),
(0x131, 'V'),
(0x132, 'M', 'ij'),
(0x134, 'M', 'ĵ'),
(0x135, 'V'),
(0x136, 'M', 'ķ'),
(0x137, 'V'),
(0x139, 'M', 'ĺ'),
(0x13A, 'V'),
(0x13B, 'M', 'ļ'),
(0x13C, 'V'),
(0x13D, 'M', 'ľ'),
(0x13E, 'V'),
(0x13F, 'M', 'l·'),
(0x141, 'M', 'ł'),
(0x142, 'V'),
(0x143, 'M', 'ń'),
(0x144, 'V'),
(0x145, 'M', 'ņ'),
(0x146, 'V'),
(0x147, 'M', 'ň'),
(0x148, 'V'),
(0x149, 'M', 'ʼn'),
(0x14A, 'M', 'ŋ'),
(0x14B, 'V'),
(0x14C, 'M', 'ō'),
(0x14D, 'V'),
(0x14E, 'M', 'ŏ'),
(0x14F, 'V'),
(0x150, 'M', 'ő'),
(0x151, 'V'),
(0x152, 'M', 'œ'),
(0x153, 'V'),
(0x154, 'M', 'ŕ'),
(0x155, 'V'),
(0x156, 'M', 'ŗ'),
(0x157, 'V'),
(0x158, 'M', 'ř'),
(0x159, 'V'),
(0x15A, 'M', 'ś'),
(0x15B, 'V'),
(0x15C, 'M', 'ŝ'),
(0x15D, 'V'),
(0x15E, 'M', 'ş'),
(0x15F, 'V'),
(0x160, 'M', 'š'),
(0x161, 'V'),
(0x162, 'M', 'ţ'),
(0x163, 'V'),
(0x164, 'M', 'ť'),
(0x165, 'V'),
(0x166, 'M', 'ŧ'),
(0x167, 'V'),
(0x168, 'M', 'ũ'),
(0x169, 'V'),
(0x16A, 'M', 'ū'),
(0x16B, 'V'),
(0x16C, 'M', 'ŭ'),
(0x16D, 'V'),
(0x16E, 'M', 'ů'),
(0x16F, 'V'),
(0x170, 'M', 'ű'),
(0x171, 'V'),
(0x172, 'M', 'ų'),
(0x173, 'V'),
(0x174, 'M', 'ŵ'),
(0x175, 'V'),
(0x176, 'M', 'ŷ'),
(0x177, 'V'),
(0x178, 'M', 'ÿ'),
(0x179, 'M', 'ź'),
(0x17A, 'V'),
(0x17B, 'M', 'ż'),
(0x17C, 'V'),
(0x17D, 'M', 'ž'),
(0x17E, 'V'),
(0x17F, 'M', 's'),
(0x180, 'V'),
(0x181, 'M', 'ɓ'),
(0x182, 'M', 'ƃ'),
(0x183, 'V'),
(0x184, 'M', 'ƅ'),
(0x185, 'V'),
(0x186, 'M', 'ɔ'),
(0x187, 'M', 'ƈ'),
(0x188, 'V'),
(0x189, 'M', 'ɖ'),
(0x18A, 'M', 'ɗ'),
(0x18B, 'M', 'ƌ'),
(0x18C, 'V'),
(0x18E, 'M', 'ǝ'),
(0x18F, 'M', 'ə'),
(0x190, 'M', 'ɛ'),
(0x191, 'M', 'ƒ'),
(0x192, 'V'),
(0x193, 'M', 'ɠ'),
]
def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x194, 'M', 'ɣ'),
(0x195, 'V'),
(0x196, 'M', 'ɩ'),
(0x197, 'M', 'ɨ'),
(0x198, 'M', 'ƙ'),
(0x199, 'V'),
(0x19C, 'M', 'ɯ'),
(0x19D, 'M', 'ɲ'),
(0x19E, 'V'),
(0x19F, 'M', 'ɵ'),
(0x1A0, 'M', 'ơ'),
(0x1A1, 'V'),
(0x1A2, 'M', 'ƣ'),
(0x1A3, 'V'),
(0x1A4, 'M', 'ƥ'),
(0x1A5, 'V'),
(0x1A6, 'M', 'ʀ'),
(0x1A7, 'M', 'ƨ'),
(0x1A8, 'V'),
(0x1A9, 'M', 'ʃ'),
(0x1AA, 'V'),
(0x1AC, 'M', 'ƭ'),
(0x1AD, 'V'),
(0x1AE, 'M', 'ʈ'),
(0x1AF, 'M', 'ư'),
(0x1B0, 'V'),
(0x1B1, 'M', 'ʊ'),
(0x1B2, 'M', 'ʋ'),
(0x1B3, 'M', 'ƴ'),
(0x1B4, 'V'),
(0x1B5, 'M', 'ƶ'),
(0x1B6, 'V'),
(0x1B7, 'M', 'ʒ'),
(0x1B8, 'M', 'ƹ'),
(0x1B9, 'V'),
(0x1BC, 'M', 'ƽ'),
(0x1BD, 'V'),
(0x1C4, 'M', 'dž'),
(0x1C7, 'M', 'lj'),
(0x1CA, 'M', 'nj'),
(0x1CD, 'M', 'ǎ'),
(0x1CE, 'V'),
(0x1CF, 'M', 'ǐ'),
(0x1D0, 'V'),
(0x1D1, 'M', 'ǒ'),
(0x1D2, 'V'),
(0x1D3, 'M', 'ǔ'),
(0x1D4, 'V'),
(0x1D5, 'M', 'ǖ'),
(0x1D6, 'V'),
(0x1D7, 'M', 'ǘ'),
(0x1D8, 'V'),
(0x1D9, 'M', 'ǚ'),
(0x1DA, 'V'),
(0x1DB, 'M', 'ǜ'),
(0x1DC, 'V'),
(0x1DE, 'M', 'ǟ'),
(0x1DF, 'V'),
(0x1E0, 'M', 'ǡ'),
(0x1E1, 'V'),
(0x1E2, 'M', 'ǣ'),
(0x1E3, 'V'),
(0x1E4, 'M', 'ǥ'),
(0x1E5, 'V'),
(0x1E6, 'M', 'ǧ'),
(0x1E7, 'V'),
(0x1E8, 'M', 'ǩ'),
(0x1E9, 'V'),
(0x1EA, 'M', 'ǫ'),
(0x1EB, 'V'),
(0x1EC, 'M', 'ǭ'),
(0x1ED, 'V'),
(0x1EE, 'M', 'ǯ'),
(0x1EF, 'V'),
(0x1F1, 'M', 'dz'),
(0x1F4, 'M', 'ǵ'),
(0x1F5, 'V'),
(0x1F6, 'M', 'ƕ'),
(0x1F7, 'M', 'ƿ'),
(0x1F8, 'M', 'ǹ'),
(0x1F9, 'V'),
(0x1FA, 'M', 'ǻ'),
(0x1FB, 'V'),
(0x1FC, 'M', 'ǽ'),
(0x1FD, 'V'),
(0x1FE, 'M', 'ǿ'),
(0x1FF, 'V'),
(0x200, 'M', 'ȁ'),
(0x201, 'V'),
(0x202, 'M', 'ȃ'),
(0x203, 'V'),
(0x204, 'M', 'ȅ'),
(0x205, 'V'),
(0x206, 'M', 'ȇ'),
(0x207, 'V'),
(0x208, 'M', 'ȉ'),
(0x209, 'V'),
(0x20A, 'M', 'ȋ'),
(0x20B, 'V'),
(0x20C, 'M', 'ȍ'),
]
def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x20D, 'V'),
(0x20E, 'M', 'ȏ'),
(0x20F, 'V'),
(0x210, 'M', 'ȑ'),
(0x211, 'V'),
(0x212, 'M', 'ȓ'),
(0x213, 'V'),
(0x214, 'M', 'ȕ'),
(0x215, 'V'),
(0x216, 'M', 'ȗ'),
(0x217, 'V'),
(0x218, 'M', 'ș'),
(0x219, 'V'),
(0x21A, 'M', 'ț'),
(0x21B, 'V'),
(0x21C, 'M', 'ȝ'),
(0x21D, 'V'),
(0x21E, 'M', 'ȟ'),
(0x21F, 'V'),
(0x220, 'M', 'ƞ'),
(0x221, 'V'),
(0x222, 'M', 'ȣ'),
(0x223, 'V'),
(0x224, 'M', 'ȥ'),
(0x225, 'V'),
(0x226, 'M', 'ȧ'),
(0x227, 'V'),
(0x228, 'M', 'ȩ'),
(0x229, 'V'),
(0x22A, 'M', 'ȫ'),
(0x22B, 'V'),
(0x22C, 'M', 'ȭ'),
(0x22D, 'V'),
(0x22E, 'M', 'ȯ'),
(0x22F, 'V'),
(0x230, 'M', 'ȱ'),
(0x231, 'V'),
(0x232, 'M', 'ȳ'),
(0x233, 'V'),
(0x23A, 'M', 'ⱥ'),
(0x23B, 'M', 'ȼ'),
(0x23C, 'V'),
(0x23D, 'M', 'ƚ'),
(0x23E, 'M', 'ⱦ'),
(0x23F, 'V'),
(0x241, 'M', 'ɂ'),
(0x242, 'V'),
(0x243, 'M', 'ƀ'),
(0x244, 'M', 'ʉ'),
(0x245, 'M', 'ʌ'),
(0x246, 'M', 'ɇ'),
(0x247, 'V'),
(0x248, 'M', 'ɉ'),
(0x249, 'V'),
(0x24A, 'M', 'ɋ'),
(0x24B, 'V'),
(0x24C, 'M', 'ɍ'),
(0x24D, 'V'),
(0x24E, 'M', 'ɏ'),
(0x24F, 'V'),
(0x2B0, 'M', 'h'),
(0x2B1, 'M', 'ɦ'),
(0x2B2, 'M', 'j'),
(0x2B3, 'M', 'r'),
(0x2B4, 'M', 'ɹ'),
(0x2B5, 'M', 'ɻ'),
(0x2B6, 'M', 'ʁ'),
(0x2B7, 'M', 'w'),
(0x2B8, 'M', 'y'),
(0x2B9, 'V'),
(0x2D8, '3', ' ̆'),
(0x2D9, '3', ' ̇'),
(0x2DA, '3', ' ̊'),
(0x2DB, '3', ' ̨'),
(0x2DC, '3', ' ̃'),
(0x2DD, '3', ' ̋'),
(0x2DE, 'V'),
(0x2E0, 'M', 'ɣ'),
(0x2E1, 'M', 'l'),
(0x2E2, 'M', 's'),
(0x2E3, 'M', 'x'),
(0x2E4, 'M', 'ʕ'),
(0x2E5, 'V'),
(0x340, 'M', '̀'),
(0x341, 'M', '́'),
(0x342, 'V'),
(0x343, 'M', '̓'),
(0x344, 'M', '̈́'),
(0x345, 'M', 'ι'),
(0x346, 'V'),
(0x34F, 'I'),
(0x350, 'V'),
(0x370, 'M', 'ͱ'),
(0x371, 'V'),
(0x372, 'M', 'ͳ'),
(0x373, 'V'),
(0x374, 'M', 'ʹ'),
(0x375, 'V'),
(0x376, 'M', 'ͷ'),
(0x377, 'V'),
]
def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x378, 'X'),
(0x37A, '3', ' ι'),
(0x37B, 'V'),
(0x37E, '3', ';'),
(0x37F, 'M', 'ϳ'),
(0x380, 'X'),
(0x384, '3', ' ́'),
(0x385, '3', ' ̈́'),
(0x386, 'M', 'ά'),
(0x387, 'M', '·'),
(0x388, 'M', 'έ'),
(0x389, 'M', 'ή'),
(0x38A, 'M', 'ί'),
(0x38B, 'X'),
(0x38C, 'M', 'ό'),
(0x38D, 'X'),
(0x38E, 'M', 'ύ'),
(0x38F, 'M', 'ώ'),
(0x390, 'V'),
(0x391, 'M', 'α'),
(0x392, 'M', 'β'),
(0x393, 'M', 'γ'),
(0x394, 'M', 'δ'),
(0x395, 'M', 'ε'),
(0x396, 'M', 'ζ'),
(0x397, 'M', 'η'),
(0x398, 'M', 'θ'),
(0x399, 'M', 'ι'),
(0x39A, 'M', 'κ'),
(0x39B, 'M', 'λ'),
(0x39C, 'M', 'μ'),
(0x39D, 'M', 'ν'),
(0x39E, 'M', 'ξ'),
(0x39F, 'M', 'ο'),
(0x3A0, 'M', 'π'),
(0x3A1, 'M', 'ρ'),
(0x3A2, 'X'),
(0x3A3, 'M', 'σ'),
(0x3A4, 'M', 'τ'),
(0x3A5, 'M', 'υ'),
(0x3A6, 'M', 'φ'),
(0x3A7, 'M', 'χ'),
(0x3A8, 'M', 'ψ'),
(0x3A9, 'M', 'ω'),
(0x3AA, 'M', 'ϊ'),
(0x3AB, 'M', 'ϋ'),
(0x3AC, 'V'),
(0x3C2, 'D', 'σ'),
(0x3C3, 'V'),
(0x3CF, 'M', 'ϗ'),
(0x3D0, 'M', 'β'),
(0x3D1, 'M', 'θ'),
(0x3D2, 'M', 'υ'),
(0x3D3, 'M', 'ύ'),
(0x3D4, 'M', 'ϋ'),
(0x3D5, 'M', 'φ'),
(0x3D6, 'M', 'π'),
(0x3D7, 'V'),
(0x3D8, 'M', 'ϙ'),
(0x3D9, 'V'),
(0x3DA, 'M', 'ϛ'),
(0x3DB, 'V'),
(0x3DC, 'M', 'ϝ'),
(0x3DD, 'V'),
(0x3DE, 'M', 'ϟ'),
(0x3DF, 'V'),
(0x3E0, 'M', 'ϡ'),
(0x3E1, 'V'),
(0x3E2, 'M', 'ϣ'),
(0x3E3, 'V'),
(0x3E4, 'M', 'ϥ'),
(0x3E5, 'V'),
(0x3E6, 'M', 'ϧ'),
(0x3E7, 'V'),
(0x3E8, 'M', 'ϩ'),
(0x3E9, 'V'),
(0x3EA, 'M', 'ϫ'),
(0x3EB, 'V'),
(0x3EC, 'M', 'ϭ'),
(0x3ED, 'V'),
(0x3EE, 'M', 'ϯ'),
(0x3EF, 'V'),
(0x3F0, 'M', 'κ'),
(0x3F1, 'M', 'ρ'),
(0x3F2, 'M', 'σ'),
(0x3F3, 'V'),
(0x3F4, 'M', 'θ'),
(0x3F5, 'M', 'ε'),
(0x3F6, 'V'),
(0x3F7, 'M', 'ϸ'),
(0x3F8, 'V'),
(0x3F9, 'M', 'σ'),
(0x3FA, 'M', 'ϻ'),
(0x3FB, 'V'),
(0x3FD, 'M', 'ͻ'),
(0x3FE, 'M', 'ͼ'),
(0x3FF, 'M', 'ͽ'),
(0x400, 'M', 'ѐ'),
(0x401, 'M', 'ё'),
(0x402, 'M', 'ђ'),
]
def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x403, 'M', 'ѓ'),
(0x404, 'M', 'є'),
(0x405, 'M', 'ѕ'),
(0x406, 'M', 'і'),
(0x407, 'M', 'ї'),
(0x408, 'M', 'ј'),
(0x409, 'M', 'љ'),
(0x40A, 'M', 'њ'),
(0x40B, 'M', 'ћ'),
(0x40C, 'M', 'ќ'),
(0x40D, 'M', 'ѝ'),
(0x40E, 'M', 'ў'),
(0x40F, 'M', 'џ'),
(0x410, 'M', 'а'),
(0x411, 'M', 'б'),
(0x412, 'M', 'в'),
(0x413, 'M', 'г'),
(0x414, 'M', 'д'),
(0x415, 'M', 'е'),
(0x416, 'M', 'ж'),
(0x417, 'M', 'з'),
(0x418, 'M', 'и'),
(0x419, 'M', 'й'),
(0x41A, 'M', 'к'),
(0x41B, 'M', 'л'),
(0x41C, 'M', 'м'),
(0x41D, 'M', 'н'),
(0x41E, 'M', 'о'),
(0x41F, 'M', 'п'),
(0x420, 'M', 'р'),
(0x421, 'M', 'с'),
(0x422, 'M', 'т'),
(0x423, 'M', 'у'),
(0x424, 'M', 'ф'),
(0x425, 'M', 'х'),
(0x426, 'M', 'ц'),
(0x427, 'M', 'ч'),
(0x428, 'M', 'ш'),
(0x429, 'M', 'щ'),
(0x42A, 'M', 'ъ'),
(0x42B, 'M', 'ы'),
(0x42C, 'M', 'ь'),
(0x42D, 'M', 'э'),
(0x42E, 'M', 'ю'),
(0x42F, 'M', 'я'),
(0x430, 'V'),
(0x460, 'M', 'ѡ'),
(0x461, 'V'),
(0x462, 'M', 'ѣ'),
(0x463, 'V'),
(0x464, 'M', 'ѥ'),
(0x465, 'V'),
(0x466, 'M', 'ѧ'),
(0x467, 'V'),
(0x468, 'M', 'ѩ'),
(0x469, 'V'),
(0x46A, 'M', 'ѫ'),
(0x46B, 'V'),
(0x46C, 'M', 'ѭ'),
(0x46D, 'V'),
(0x46E, 'M', 'ѯ'),
(0x46F, 'V'),
(0x470, 'M', 'ѱ'),
(0x471, 'V'),
(0x472, 'M', 'ѳ'),
(0x473, 'V'),
(0x474, 'M', 'ѵ'),
(0x475, 'V'),
(0x476, 'M', 'ѷ'),
(0x477, 'V'),
(0x478, 'M', 'ѹ'),
(0x479, 'V'),
(0x47A, 'M', 'ѻ'),
(0x47B, 'V'),
(0x47C, 'M', 'ѽ'),
(0x47D, 'V'),
(0x47E, 'M', 'ѿ'),
(0x47F, 'V'),
(0x480, 'M', 'ҁ'),
(0x481, 'V'),
(0x48A, 'M', 'ҋ'),
(0x48B, 'V'),
(0x48C, 'M', 'ҍ'),
(0x48D, 'V'),
(0x48E, 'M', 'ҏ'),
(0x48F, 'V'),
(0x490, 'M', 'ґ'),
(0x491, 'V'),
(0x492, 'M', 'ғ'),
(0x493, 'V'),
(0x494, 'M', 'ҕ'),
(0x495, 'V'),
(0x496, 'M', 'җ'),
(0x497, 'V'),
(0x498, 'M', 'ҙ'),
(0x499, 'V'),
(0x49A, 'M', 'қ'),
(0x49B, 'V'),
(0x49C, 'M', 'ҝ'),
(0x49D, 'V'),
]
def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x49E, 'M', 'ҟ'),
(0x49F, 'V'),
(0x4A0, 'M', 'ҡ'),
(0x4A1, 'V'),
(0x4A2, 'M', 'ң'),
(0x4A3, 'V'),
(0x4A4, 'M', 'ҥ'),
(0x4A5, 'V'),
(0x4A6, 'M', 'ҧ'),
(0x4A7, 'V'),
(0x4A8, 'M', 'ҩ'),
(0x4A9, 'V'),
(0x4AA, 'M', 'ҫ'),
(0x4AB, 'V'),
(0x4AC, 'M', 'ҭ'),
(0x4AD, 'V'),
(0x4AE, 'M', 'ү'),
(0x4AF, 'V'),
(0x4B0, 'M', 'ұ'),
(0x4B1, 'V'),
(0x4B2, 'M', 'ҳ'),
(0x4B3, 'V'),
(0x4B4, 'M', 'ҵ'),
(0x4B5, 'V'),
(0x4B6, 'M', 'ҷ'),
(0x4B7, 'V'),
(0x4B8, 'M', 'ҹ'),
(0x4B9, 'V'),
(0x4BA, 'M', 'һ'),
(0x4BB, 'V'),
(0x4BC, 'M', 'ҽ'),
(0x4BD, 'V'),
(0x4BE, 'M', 'ҿ'),
(0x4BF, 'V'),
(0x4C0, 'X'),
(0x4C1, 'M', 'ӂ'),
(0x4C2, 'V'),
(0x4C3, 'M', 'ӄ'),
(0x4C4, 'V'),
(0x4C5, 'M', 'ӆ'),
(0x4C6, 'V'),
(0x4C7, 'M', 'ӈ'),
(0x4C8, 'V'),
(0x4C9, 'M', 'ӊ'),
(0x4CA, 'V'),
(0x4CB, 'M', 'ӌ'),
(0x4CC, 'V'),
(0x4CD, 'M', 'ӎ'),
(0x4CE, 'V'),
(0x4D0, 'M', 'ӑ'),
(0x4D1, 'V'),
(0x4D2, 'M', 'ӓ'),
(0x4D3, 'V'),
(0x4D4, 'M', 'ӕ'),
(0x4D5, 'V'),
(0x4D6, 'M', 'ӗ'),
(0x4D7, 'V'),
(0x4D8, 'M', 'ә'),
(0x4D9, 'V'),
(0x4DA, 'M', 'ӛ'),
(0x4DB, 'V'),
(0x4DC, 'M', 'ӝ'),
(0x4DD, 'V'),
(0x4DE, 'M', 'ӟ'),
(0x4DF, 'V'),
(0x4E0, 'M', 'ӡ'),
(0x4E1, 'V'),
(0x4E2, 'M', 'ӣ'),
(0x4E3, 'V'),
(0x4E4, 'M', 'ӥ'),
(0x4E5, 'V'),
(0x4E6, 'M', 'ӧ'),
(0x4E7, 'V'),
(0x4E8, 'M', 'ө'),
(0x4E9, 'V'),
(0x4EA, 'M', 'ӫ'),
(0x4EB, 'V'),
(0x4EC, 'M', 'ӭ'),
(0x4ED, 'V'),
(0x4EE, 'M', 'ӯ'),
(0x4EF, 'V'),
(0x4F0, 'M', 'ӱ'),
(0x4F1, 'V'),
(0x4F2, 'M', 'ӳ'),
(0x4F3, 'V'),
(0x4F4, 'M', 'ӵ'),
(0x4F5, 'V'),
(0x4F6, 'M', 'ӷ'),
(0x4F7, 'V'),
(0x4F8, 'M', 'ӹ'),
(0x4F9, 'V'),
(0x4FA, 'M', 'ӻ'),
(0x4FB, 'V'),
(0x4FC, 'M', 'ӽ'),
(0x4FD, 'V'),
(0x4FE, 'M', 'ӿ'),
(0x4FF, 'V'),
(0x500, 'M', 'ԁ'),
(0x501, 'V'),
(0x502, 'M', 'ԃ'),
]
def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x503, 'V'),
(0x504, 'M', 'ԅ'),
(0x505, 'V'),
(0x506, 'M', 'ԇ'),
(0x507, 'V'),
(0x508, 'M', 'ԉ'),
(0x509, 'V'),
(0x50A, 'M', 'ԋ'),
(0x50B, 'V'),
(0x50C, 'M', 'ԍ'),
(0x50D, 'V'),
(0x50E, 'M', 'ԏ'),
(0x50F, 'V'),
(0x510, 'M', 'ԑ'),
(0x511, 'V'),
(0x512, 'M', 'ԓ'),
(0x513, 'V'),
(0x514, 'M', 'ԕ'),
(0x515, 'V'),
(0x516, 'M', 'ԗ'),
(0x517, 'V'),
(0x518, 'M', 'ԙ'),
(0x519, 'V'),
(0x51A, 'M', 'ԛ'),
(0x51B, 'V'),
(0x51C, 'M', 'ԝ'),
(0x51D, 'V'),
(0x51E, 'M', 'ԟ'),
(0x51F, 'V'),
(0x520, 'M', 'ԡ'),
(0x521, 'V'),
(0x522, 'M', 'ԣ'),
(0x523, 'V'),
(0x524, 'M', 'ԥ'),
(0x525, 'V'),
(0x526, 'M', 'ԧ'),
(0x527, 'V'),
(0x528, 'M', 'ԩ'),
(0x529, 'V'),
(0x52A, 'M', 'ԫ'),
(0x52B, 'V'),
(0x52C, 'M', 'ԭ'),
(0x52D, 'V'),
(0x52E, 'M', 'ԯ'),
(0x52F, 'V'),
(0x530, 'X'),
(0x531, 'M', 'ա'),
(0x532, 'M', 'բ'),
(0x533, 'M', 'գ'),
(0x534, 'M', 'դ'),
(0x535, 'M', 'ե'),
(0x536, 'M', 'զ'),
(0x537, 'M', 'է'),
(0x538, 'M', 'ը'),
(0x539, 'M', 'թ'),
(0x53A, 'M', 'ժ'),
(0x53B, 'M', 'ի'),
(0x53C, 'M', 'լ'),
(0x53D, 'M', 'խ'),
(0x53E, 'M', 'ծ'),
(0x53F, 'M', 'կ'),
(0x540, 'M', 'հ'),
(0x541, 'M', 'ձ'),
(0x542, 'M', 'ղ'),
(0x543, 'M', 'ճ'),
(0x544, 'M', 'մ'),
(0x545, 'M', 'յ'),
(0x546, 'M', 'ն'),
(0x547, 'M', 'շ'),
(0x548, 'M', 'ո'),
(0x549, 'M', 'չ'),
(0x54A, 'M', 'պ'),
(0x54B, 'M', 'ջ'),
(0x54C, 'M', 'ռ'),
(0x54D, 'M', 'ս'),
(0x54E, 'M', 'վ'),
(0x54F, 'M', 'տ'),
(0x550, 'M', 'ր'),
(0x551, 'M', 'ց'),
(0x552, 'M', 'ւ'),
(0x553, 'M', 'փ'),
(0x554, 'M', 'ք'),
(0x555, 'M', 'օ'),
(0x556, 'M', 'ֆ'),
(0x557, 'X'),
(0x559, 'V'),
(0x587, 'M', 'եւ'),
(0x588, 'V'),
(0x58B, 'X'),
(0x58D, 'V'),
(0x590, 'X'),
(0x591, 'V'),
(0x5C8, 'X'),
(0x5D0, 'V'),
(0x5EB, 'X'),
(0x5EF, 'V'),
(0x5F5, 'X'),
(0x606, 'V'),
(0x61C, 'X'),
(0x61D, 'V'),
]
def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x675, 'M', 'اٴ'),
(0x676, 'M', 'وٴ'),
(0x677, 'M', 'ۇٴ'),
(0x678, 'M', 'يٴ'),
(0x679, 'V'),
(0x6DD, 'X'),
(0x6DE, 'V'),
(0x70E, 'X'),
(0x710, 'V'),
(0x74B, 'X'),
(0x74D, 'V'),
(0x7B2, 'X'),
(0x7C0, 'V'),
(0x7FB, 'X'),
(0x7FD, 'V'),
(0x82E, 'X'),
(0x830, 'V'),
(0x83F, 'X'),
(0x840, 'V'),
(0x85C, 'X'),
(0x85E, 'V'),
(0x85F, 'X'),
(0x860, 'V'),
(0x86B, 'X'),
(0x870, 'V'),
(0x88F, 'X'),
(0x898, 'V'),
(0x8E2, 'X'),
(0x8E3, 'V'),
(0x958, 'M', 'क़'),
(0x959, 'M', 'ख़'),
(0x95A, 'M', 'ग़'),
(0x95B, 'M', 'ज़'),
(0x95C, 'M', 'ड़'),
(0x95D, 'M', 'ढ़'),
(0x95E, 'M', 'फ़'),
(0x95F, 'M', 'य़'),
(0x960, 'V'),
(0x984, 'X'),
(0x985, 'V'),
(0x98D, 'X'),
(0x98F, 'V'),
(0x991, 'X'),
(0x993, 'V'),
(0x9A9, 'X'),
(0x9AA, 'V'),
(0x9B1, 'X'),
(0x9B2, 'V'),
(0x9B3, 'X'),
(0x9B6, 'V'),
(0x9BA, 'X'),
(0x9BC, 'V'),
(0x9C5, 'X'),
(0x9C7, 'V'),
(0x9C9, 'X'),
(0x9CB, 'V'),
(0x9CF, 'X'),
(0x9D7, 'V'),
(0x9D8, 'X'),
(0x9DC, 'M', 'ড়'),
(0x9DD, 'M', 'ঢ়'),
(0x9DE, 'X'),
(0x9DF, 'M', 'য়'),
(0x9E0, 'V'),
(0x9E4, 'X'),
(0x9E6, 'V'),
(0x9FF, 'X'),
(0xA01, 'V'),
(0xA04, 'X'),
(0xA05, 'V'),
(0xA0B, 'X'),
(0xA0F, 'V'),
(0xA11, 'X'),
(0xA13, 'V'),
(0xA29, 'X'),
(0xA2A, 'V'),
(0xA31, 'X'),
(0xA32, 'V'),
(0xA33, 'M', 'ਲ਼'),
(0xA34, 'X'),
(0xA35, 'V'),
(0xA36, 'M', 'ਸ਼'),
(0xA37, 'X'),
(0xA38, 'V'),
(0xA3A, 'X'),
(0xA3C, 'V'),
(0xA3D, 'X'),
(0xA3E, 'V'),
(0xA43, 'X'),
(0xA47, 'V'),
(0xA49, 'X'),
(0xA4B, 'V'),
(0xA4E, 'X'),
(0xA51, 'V'),
(0xA52, 'X'),
(0xA59, 'M', 'ਖ਼'),
(0xA5A, 'M', 'ਗ਼'),
(0xA5B, 'M', 'ਜ਼'),
(0xA5C, 'V'),
(0xA5D, 'X'),
]
def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xA5E, 'M', 'ਫ਼'),
(0xA5F, 'X'),
(0xA66, 'V'),
(0xA77, 'X'),
(0xA81, 'V'),
(0xA84, 'X'),
(0xA85, 'V'),
(0xA8E, 'X'),
(0xA8F, 'V'),
(0xA92, 'X'),
(0xA93, 'V'),
(0xAA9, 'X'),
(0xAAA, 'V'),
(0xAB1, 'X'),
(0xAB2, 'V'),
(0xAB4, 'X'),
(0xAB5, 'V'),
(0xABA, 'X'),
(0xABC, 'V'),
(0xAC6, 'X'),
(0xAC7, 'V'),
(0xACA, 'X'),
(0xACB, 'V'),
(0xACE, 'X'),
(0xAD0, 'V'),
(0xAD1, 'X'),
(0xAE0, 'V'),
(0xAE4, 'X'),
(0xAE6, 'V'),
(0xAF2, 'X'),
(0xAF9, 'V'),
(0xB00, 'X'),
(0xB01, 'V'),
(0xB04, 'X'),
(0xB05, 'V'),
(0xB0D, 'X'),
(0xB0F, 'V'),
(0xB11, 'X'),
(0xB13, 'V'),
(0xB29, 'X'),
(0xB2A, 'V'),
(0xB31, 'X'),
(0xB32, 'V'),
(0xB34, 'X'),
(0xB35, 'V'),
(0xB3A, 'X'),
(0xB3C, 'V'),
(0xB45, 'X'),
(0xB47, 'V'),
(0xB49, 'X'),
(0xB4B, 'V'),
(0xB4E, 'X'),
(0xB55, 'V'),
(0xB58, 'X'),
(0xB5C, 'M', 'ଡ଼'),
(0xB5D, 'M', 'ଢ଼'),
(0xB5E, 'X'),
(0xB5F, 'V'),
(0xB64, 'X'),
(0xB66, 'V'),
(0xB78, 'X'),
(0xB82, 'V'),
(0xB84, 'X'),
(0xB85, 'V'),
(0xB8B, 'X'),
(0xB8E, 'V'),
(0xB91, 'X'),
(0xB92, 'V'),
(0xB96, 'X'),
(0xB99, 'V'),
(0xB9B, 'X'),
(0xB9C, 'V'),
(0xB9D, 'X'),
(0xB9E, 'V'),
(0xBA0, 'X'),
(0xBA3, 'V'),
(0xBA5, 'X'),
(0xBA8, 'V'),
(0xBAB, 'X'),
(0xBAE, 'V'),
(0xBBA, 'X'),
(0xBBE, 'V'),
(0xBC3, 'X'),
(0xBC6, 'V'),
(0xBC9, 'X'),
(0xBCA, 'V'),
(0xBCE, 'X'),
(0xBD0, 'V'),
(0xBD1, 'X'),
(0xBD7, 'V'),
(0xBD8, 'X'),
(0xBE6, 'V'),
(0xBFB, 'X'),
(0xC00, 'V'),
(0xC0D, 'X'),
(0xC0E, 'V'),
(0xC11, 'X'),
(0xC12, 'V'),
(0xC29, 'X'),
(0xC2A, 'V'),
]
def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xC3A, 'X'),
(0xC3C, 'V'),
(0xC45, 'X'),
(0xC46, 'V'),
(0xC49, 'X'),
(0xC4A, 'V'),
(0xC4E, 'X'),
(0xC55, 'V'),
(0xC57, 'X'),
(0xC58, 'V'),
(0xC5B, 'X'),
(0xC5D, 'V'),
(0xC5E, 'X'),
(0xC60, 'V'),
(0xC64, 'X'),
(0xC66, 'V'),
(0xC70, 'X'),
(0xC77, 'V'),
(0xC8D, 'X'),
(0xC8E, 'V'),
(0xC91, 'X'),
(0xC92, 'V'),
(0xCA9, 'X'),
(0xCAA, 'V'),
(0xCB4, 'X'),
(0xCB5, 'V'),
(0xCBA, 'X'),
(0xCBC, 'V'),
(0xCC5, 'X'),
(0xCC6, 'V'),
(0xCC9, 'X'),
(0xCCA, 'V'),
(0xCCE, 'X'),
(0xCD5, 'V'),
(0xCD7, 'X'),
(0xCDD, 'V'),
(0xCDF, 'X'),
(0xCE0, 'V'),
(0xCE4, 'X'),
(0xCE6, 'V'),
(0xCF0, 'X'),
(0xCF1, 'V'),
(0xCF4, 'X'),
(0xD00, 'V'),
(0xD0D, 'X'),
(0xD0E, 'V'),
(0xD11, 'X'),
(0xD12, 'V'),
(0xD45, 'X'),
(0xD46, 'V'),
(0xD49, 'X'),
(0xD4A, 'V'),
(0xD50, 'X'),
(0xD54, 'V'),
(0xD64, 'X'),
(0xD66, 'V'),
(0xD80, 'X'),
(0xD81, 'V'),
(0xD84, 'X'),
(0xD85, 'V'),
(0xD97, 'X'),
(0xD9A, 'V'),
(0xDB2, 'X'),
(0xDB3, 'V'),
(0xDBC, 'X'),
(0xDBD, 'V'),
(0xDBE, 'X'),
(0xDC0, 'V'),
(0xDC7, 'X'),
(0xDCA, 'V'),
(0xDCB, 'X'),
(0xDCF, 'V'),
(0xDD5, 'X'),
(0xDD6, 'V'),
(0xDD7, 'X'),
(0xDD8, 'V'),
(0xDE0, 'X'),
(0xDE6, 'V'),
(0xDF0, 'X'),
(0xDF2, 'V'),
(0xDF5, 'X'),
(0xE01, 'V'),
(0xE33, 'M', 'ํา'),
(0xE34, 'V'),
(0xE3B, 'X'),
(0xE3F, 'V'),
(0xE5C, 'X'),
(0xE81, 'V'),
(0xE83, 'X'),
(0xE84, 'V'),
(0xE85, 'X'),
(0xE86, 'V'),
(0xE8B, 'X'),
(0xE8C, 'V'),
(0xEA4, 'X'),
(0xEA5, 'V'),
(0xEA6, 'X'),
(0xEA7, 'V'),
(0xEB3, 'M', 'ໍາ'),
(0xEB4, 'V'),
]
def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xEBE, 'X'),
(0xEC0, 'V'),
(0xEC5, 'X'),
(0xEC6, 'V'),
(0xEC7, 'X'),
(0xEC8, 'V'),
(0xECF, 'X'),
(0xED0, 'V'),
(0xEDA, 'X'),
(0xEDC, 'M', 'ຫນ'),
(0xEDD, 'M', 'ຫມ'),
(0xEDE, 'V'),
(0xEE0, 'X'),
(0xF00, 'V'),
(0xF0C, 'M', '་'),
(0xF0D, 'V'),
(0xF43, 'M', 'གྷ'),
(0xF44, 'V'),
(0xF48, 'X'),
(0xF49, 'V'),
(0xF4D, 'M', 'ཌྷ'),
(0xF4E, 'V'),
(0xF52, 'M', 'དྷ'),
(0xF53, 'V'),
(0xF57, 'M', 'བྷ'),
(0xF58, 'V'),
(0xF5C, 'M', 'ཛྷ'),
(0xF5D, 'V'),
(0xF69, 'M', 'ཀྵ'),
(0xF6A, 'V'),
(0xF6D, 'X'),
(0xF71, 'V'),
(0xF73, 'M', 'ཱི'),
(0xF74, 'V'),
(0xF75, 'M', 'ཱུ'),
(0xF76, 'M', 'ྲྀ'),
(0xF77, 'M', 'ྲཱྀ'),
(0xF78, 'M', 'ླྀ'),
(0xF79, 'M', 'ླཱྀ'),
(0xF7A, 'V'),
(0xF81, 'M', 'ཱྀ'),
(0xF82, 'V'),
(0xF93, 'M', 'ྒྷ'),
(0xF94, 'V'),
(0xF98, 'X'),
(0xF99, 'V'),
(0xF9D, 'M', 'ྜྷ'),
(0xF9E, 'V'),
(0xFA2, 'M', 'ྡྷ'),
(0xFA3, 'V'),
(0xFA7, 'M', 'ྦྷ'),
(0xFA8, 'V'),
(0xFAC, 'M', 'ྫྷ'),
(0xFAD, 'V'),
(0xFB9, 'M', 'ྐྵ'),
(0xFBA, 'V'),
(0xFBD, 'X'),
(0xFBE, 'V'),
(0xFCD, 'X'),
(0xFCE, 'V'),
(0xFDB, 'X'),
(0x1000, 'V'),
(0x10A0, 'X'),
(0x10C7, 'M', 'ⴧ'),
(0x10C8, 'X'),
(0x10CD, 'M', 'ⴭ'),
(0x10CE, 'X'),
(0x10D0, 'V'),
(0x10FC, 'M', 'ნ'),
(0x10FD, 'V'),
(0x115F, 'X'),
(0x1161, 'V'),
(0x1249, 'X'),
(0x124A, 'V'),
(0x124E, 'X'),
(0x1250, 'V'),
(0x1257, 'X'),
(0x1258, 'V'),
(0x1259, 'X'),
(0x125A, 'V'),
(0x125E, 'X'),
(0x1260, 'V'),
(0x1289, 'X'),
(0x128A, 'V'),
(0x128E, 'X'),
(0x1290, 'V'),
(0x12B1, 'X'),
(0x12B2, 'V'),
(0x12B6, 'X'),
(0x12B8, 'V'),
(0x12BF, 'X'),
(0x12C0, 'V'),
(0x12C1, 'X'),
(0x12C2, 'V'),
(0x12C6, 'X'),
(0x12C8, 'V'),
(0x12D7, 'X'),
(0x12D8, 'V'),
(0x1311, 'X'),
(0x1312, 'V'),
]
def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1316, 'X'),
(0x1318, 'V'),
(0x135B, 'X'),
(0x135D, 'V'),
(0x137D, 'X'),
(0x1380, 'V'),
(0x139A, 'X'),
(0x13A0, 'V'),
(0x13F6, 'X'),
(0x13F8, 'M', 'Ᏸ'),
(0x13F9, 'M', 'Ᏹ'),
(0x13FA, 'M', 'Ᏺ'),
(0x13FB, 'M', 'Ᏻ'),
(0x13FC, 'M', 'Ᏼ'),
(0x13FD, 'M', 'Ᏽ'),
(0x13FE, 'X'),
(0x1400, 'V'),
(0x1680, 'X'),
(0x1681, 'V'),
(0x169D, 'X'),
(0x16A0, 'V'),
(0x16F9, 'X'),
(0x1700, 'V'),
(0x1716, 'X'),
(0x171F, 'V'),
(0x1737, 'X'),
(0x1740, 'V'),
(0x1754, 'X'),
(0x1760, 'V'),
(0x176D, 'X'),
(0x176E, 'V'),
(0x1771, 'X'),
(0x1772, 'V'),
(0x1774, 'X'),
(0x1780, 'V'),
(0x17B4, 'X'),
(0x17B6, 'V'),
(0x17DE, 'X'),
(0x17E0, 'V'),
(0x17EA, 'X'),
(0x17F0, 'V'),
(0x17FA, 'X'),
(0x1800, 'V'),
(0x1806, 'X'),
(0x1807, 'V'),
(0x180B, 'I'),
(0x180E, 'X'),
(0x180F, 'I'),
(0x1810, 'V'),
(0x181A, 'X'),
(0x1820, 'V'),
(0x1879, 'X'),
(0x1880, 'V'),
(0x18AB, 'X'),
(0x18B0, 'V'),
(0x18F6, 'X'),
(0x1900, 'V'),
(0x191F, 'X'),
(0x1920, 'V'),
(0x192C, 'X'),
(0x1930, 'V'),
(0x193C, 'X'),
(0x1940, 'V'),
(0x1941, 'X'),
(0x1944, 'V'),
(0x196E, 'X'),
(0x1970, 'V'),
(0x1975, 'X'),
(0x1980, 'V'),
(0x19AC, 'X'),
(0x19B0, 'V'),
(0x19CA, 'X'),
(0x19D0, 'V'),
(0x19DB, 'X'),
(0x19DE, 'V'),
(0x1A1C, 'X'),
(0x1A1E, 'V'),
(0x1A5F, 'X'),
(0x1A60, 'V'),
(0x1A7D, 'X'),
(0x1A7F, 'V'),
(0x1A8A, 'X'),
(0x1A90, 'V'),
(0x1A9A, 'X'),
(0x1AA0, 'V'),
(0x1AAE, 'X'),
(0x1AB0, 'V'),
(0x1ACF, 'X'),
(0x1B00, 'V'),
(0x1B4D, 'X'),
(0x1B50, 'V'),
(0x1B7F, 'X'),
(0x1B80, 'V'),
(0x1BF4, 'X'),
(0x1BFC, 'V'),
(0x1C38, 'X'),
(0x1C3B, 'V'),
(0x1C4A, 'X'),
(0x1C4D, 'V'),
(0x1C80, 'M', 'в'),
]
def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1C81, 'M', 'д'),
(0x1C82, 'M', 'о'),
(0x1C83, 'M', 'с'),
(0x1C84, 'M', 'т'),
(0x1C86, 'M', 'ъ'),
(0x1C87, 'M', 'ѣ'),
(0x1C88, 'M', 'ꙋ'),
(0x1C89, 'X'),
(0x1C90, 'M', 'ა'),
(0x1C91, 'M', 'ბ'),
(0x1C92, 'M', 'გ'),
(0x1C93, 'M', 'დ'),
(0x1C94, 'M', 'ე'),
(0x1C95, 'M', 'ვ'),
(0x1C96, 'M', 'ზ'),
(0x1C97, 'M', 'თ'),
(0x1C98, 'M', 'ი'),
(0x1C99, 'M', 'კ'),
(0x1C9A, 'M', 'ლ'),
(0x1C9B, 'M', 'მ'),
(0x1C9C, 'M', 'ნ'),
(0x1C9D, 'M', 'ო'),
(0x1C9E, 'M', 'პ'),
(0x1C9F, 'M', 'ჟ'),
(0x1CA0, 'M', 'რ'),
(0x1CA1, 'M', 'ს'),
(0x1CA2, 'M', 'ტ'),
(0x1CA3, 'M', 'უ'),
(0x1CA4, 'M', 'ფ'),
(0x1CA5, 'M', 'ქ'),
(0x1CA6, 'M', 'ღ'),
(0x1CA7, 'M', 'ყ'),
(0x1CA8, 'M', 'შ'),
(0x1CA9, 'M', 'ჩ'),
(0x1CAA, 'M', 'ც'),
(0x1CAB, 'M', 'ძ'),
(0x1CAC, 'M', 'წ'),
(0x1CAD, 'M', 'ჭ'),
(0x1CAE, 'M', 'ხ'),
(0x1CAF, 'M', 'ჯ'),
(0x1CB0, 'M', 'ჰ'),
(0x1CB1, 'M', 'ჱ'),
(0x1CB2, 'M', 'ჲ'),
(0x1CB3, 'M', 'ჳ'),
(0x1CB4, 'M', 'ჴ'),
(0x1CB5, 'M', 'ჵ'),
(0x1CB6, 'M', 'ჶ'),
(0x1CB7, 'M', 'ჷ'),
(0x1CB8, 'M', 'ჸ'),
(0x1CB9, 'M', 'ჹ'),
(0x1CBA, 'M', 'ჺ'),
(0x1CBB, 'X'),
(0x1CBD, 'M', 'ჽ'),
(0x1CBE, 'M', 'ჾ'),
(0x1CBF, 'M', 'ჿ'),
(0x1CC0, 'V'),
(0x1CC8, 'X'),
(0x1CD0, 'V'),
(0x1CFB, 'X'),
(0x1D00, 'V'),
(0x1D2C, 'M', 'a'),
(0x1D2D, 'M', 'æ'),
(0x1D2E, 'M', 'b'),
(0x1D2F, 'V'),
(0x1D30, 'M', 'd'),
(0x1D31, 'M', 'e'),
(0x1D32, 'M', 'ǝ'),
(0x1D33, 'M', 'g'),
(0x1D34, 'M', 'h'),
(0x1D35, 'M', 'i'),
(0x1D36, 'M', 'j'),
(0x1D37, 'M', 'k'),
(0x1D38, 'M', 'l'),
(0x1D39, 'M', 'm'),
(0x1D3A, 'M', 'n'),
(0x1D3B, 'V'),
(0x1D3C, 'M', 'o'),
(0x1D3D, 'M', 'ȣ'),
(0x1D3E, 'M', 'p'),
(0x1D3F, 'M', 'r'),
(0x1D40, 'M', 't'),
(0x1D41, 'M', 'u'),
(0x1D42, 'M', 'w'),
(0x1D43, 'M', 'a'),
(0x1D44, 'M', 'ɐ'),
(0x1D45, 'M', 'ɑ'),
(0x1D46, 'M', 'ᴂ'),
(0x1D47, 'M', 'b'),
(0x1D48, 'M', 'd'),
(0x1D49, 'M', 'e'),
(0x1D4A, 'M', 'ə'),
(0x1D4B, 'M', 'ɛ'),
(0x1D4C, 'M', 'ɜ'),
(0x1D4D, 'M', 'g'),
(0x1D4E, 'V'),
(0x1D4F, 'M', 'k'),
(0x1D50, 'M', 'm'),
(0x1D51, 'M', 'ŋ'),
(0x1D52, 'M', 'o'),
(0x1D53, 'M', 'ɔ'),
]
def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D54, 'M', 'ᴖ'),
(0x1D55, 'M', 'ᴗ'),
(0x1D56, 'M', 'p'),
(0x1D57, 'M', 't'),
(0x1D58, 'M', 'u'),
(0x1D59, 'M', 'ᴝ'),
(0x1D5A, 'M', 'ɯ'),
(0x1D5B, 'M', 'v'),
(0x1D5C, 'M', 'ᴥ'),
(0x1D5D, 'M', 'β'),
(0x1D5E, 'M', 'γ'),
(0x1D5F, 'M', 'δ'),
(0x1D60, 'M', 'φ'),
(0x1D61, 'M', 'χ'),
(0x1D62, 'M', 'i'),
(0x1D63, 'M', 'r'),
(0x1D64, 'M', 'u'),
(0x1D65, 'M', 'v'),
(0x1D66, 'M', 'β'),
(0x1D67, 'M', 'γ'),
(0x1D68, 'M', 'ρ'),
(0x1D69, 'M', 'φ'),
(0x1D6A, 'M', 'χ'),
(0x1D6B, 'V'),
(0x1D78, 'M', 'н'),
(0x1D79, 'V'),
(0x1D9B, 'M', 'ɒ'),
(0x1D9C, 'M', 'c'),
(0x1D9D, 'M', 'ɕ'),
(0x1D9E, 'M', 'ð'),
(0x1D9F, 'M', 'ɜ'),
(0x1DA0, 'M', 'f'),
(0x1DA1, 'M', 'ɟ'),
(0x1DA2, 'M', 'ɡ'),
(0x1DA3, 'M', 'ɥ'),
(0x1DA4, 'M', 'ɨ'),
(0x1DA5, 'M', 'ɩ'),
(0x1DA6, 'M', 'ɪ'),
(0x1DA7, 'M', 'ᵻ'),
(0x1DA8, 'M', 'ʝ'),
(0x1DA9, 'M', 'ɭ'),
(0x1DAA, 'M', 'ᶅ'),
(0x1DAB, 'M', 'ʟ'),
(0x1DAC, 'M', 'ɱ'),
(0x1DAD, 'M', 'ɰ'),
(0x1DAE, 'M', 'ɲ'),
(0x1DAF, 'M', 'ɳ'),
(0x1DB0, 'M', 'ɴ'),
(0x1DB1, 'M', 'ɵ'),
(0x1DB2, 'M', 'ɸ'),
(0x1DB3, 'M', 'ʂ'),
(0x1DB4, 'M', 'ʃ'),
(0x1DB5, 'M', 'ƫ'),
(0x1DB6, 'M', 'ʉ'),
(0x1DB7, 'M', 'ʊ'),
(0x1DB8, 'M', 'ᴜ'),
(0x1DB9, 'M', 'ʋ'),
(0x1DBA, 'M', 'ʌ'),
(0x1DBB, 'M', 'z'),
(0x1DBC, 'M', 'ʐ'),
(0x1DBD, 'M', 'ʑ'),
(0x1DBE, 'M', 'ʒ'),
(0x1DBF, 'M', 'θ'),
(0x1DC0, 'V'),
(0x1E00, 'M', 'ḁ'),
(0x1E01, 'V'),
(0x1E02, 'M', 'ḃ'),
(0x1E03, 'V'),
(0x1E04, 'M', 'ḅ'),
(0x1E05, 'V'),
(0x1E06, 'M', 'ḇ'),
(0x1E07, 'V'),
(0x1E08, 'M', 'ḉ'),
(0x1E09, 'V'),
(0x1E0A, 'M', 'ḋ'),
(0x1E0B, 'V'),
(0x1E0C, 'M', 'ḍ'),
(0x1E0D, 'V'),
(0x1E0E, 'M', 'ḏ'),
(0x1E0F, 'V'),
(0x1E10, 'M', 'ḑ'),
(0x1E11, 'V'),
(0x1E12, 'M', 'ḓ'),
(0x1E13, 'V'),
(0x1E14, 'M', 'ḕ'),
(0x1E15, 'V'),
(0x1E16, 'M', 'ḗ'),
(0x1E17, 'V'),
(0x1E18, 'M', 'ḙ'),
(0x1E19, 'V'),
(0x1E1A, 'M', 'ḛ'),
(0x1E1B, 'V'),
(0x1E1C, 'M', 'ḝ'),
(0x1E1D, 'V'),
(0x1E1E, 'M', 'ḟ'),
(0x1E1F, 'V'),
(0x1E20, 'M', 'ḡ'),
(0x1E21, 'V'),
(0x1E22, 'M', 'ḣ'),
(0x1E23, 'V'),
]
def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1E24, 'M', 'ḥ'),
(0x1E25, 'V'),
(0x1E26, 'M', 'ḧ'),
(0x1E27, 'V'),
(0x1E28, 'M', 'ḩ'),
(0x1E29, 'V'),
(0x1E2A, 'M', 'ḫ'),
(0x1E2B, 'V'),
(0x1E2C, 'M', 'ḭ'),
(0x1E2D, 'V'),
(0x1E2E, 'M', 'ḯ'),
(0x1E2F, 'V'),
(0x1E30, 'M', 'ḱ'),
(0x1E31, 'V'),
(0x1E32, 'M', 'ḳ'),
(0x1E33, 'V'),
(0x1E34, 'M', 'ḵ'),
(0x1E35, 'V'),
(0x1E36, 'M', 'ḷ'),
(0x1E37, 'V'),
(0x1E38, 'M', 'ḹ'),
(0x1E39, 'V'),
(0x1E3A, 'M', 'ḻ'),
(0x1E3B, 'V'),
(0x1E3C, 'M', 'ḽ'),
(0x1E3D, 'V'),
(0x1E3E, 'M', 'ḿ'),
(0x1E3F, 'V'),
(0x1E40, 'M', 'ṁ'),
(0x1E41, 'V'),
(0x1E42, 'M', 'ṃ'),
(0x1E43, 'V'),
(0x1E44, 'M', 'ṅ'),
(0x1E45, 'V'),
(0x1E46, 'M', 'ṇ'),
(0x1E47, 'V'),
(0x1E48, 'M', 'ṉ'),
(0x1E49, 'V'),
(0x1E4A, 'M', 'ṋ'),
(0x1E4B, 'V'),
(0x1E4C, 'M', 'ṍ'),
(0x1E4D, 'V'),
(0x1E4E, 'M', 'ṏ'),
(0x1E4F, 'V'),
(0x1E50, 'M', 'ṑ'),
(0x1E51, 'V'),
(0x1E52, 'M', 'ṓ'),
(0x1E53, 'V'),
(0x1E54, 'M', 'ṕ'),
(0x1E55, 'V'),
(0x1E56, 'M', 'ṗ'),
(0x1E57, 'V'),
(0x1E58, 'M', 'ṙ'),
(0x1E59, 'V'),
(0x1E5A, 'M', 'ṛ'),
(0x1E5B, 'V'),
(0x1E5C, 'M', 'ṝ'),
(0x1E5D, 'V'),
(0x1E5E, 'M', 'ṟ'),
(0x1E5F, 'V'),
(0x1E60, 'M', 'ṡ'),
(0x1E61, 'V'),
(0x1E62, 'M', 'ṣ'),
(0x1E63, 'V'),
(0x1E64, 'M', 'ṥ'),
(0x1E65, 'V'),
(0x1E66, 'M', 'ṧ'),
(0x1E67, 'V'),
(0x1E68, 'M', 'ṩ'),
(0x1E69, 'V'),
(0x1E6A, 'M', 'ṫ'),
(0x1E6B, 'V'),
(0x1E6C, 'M', 'ṭ'),
(0x1E6D, 'V'),
(0x1E6E, 'M', 'ṯ'),
(0x1E6F, 'V'),
(0x1E70, 'M', 'ṱ'),
(0x1E71, 'V'),
(0x1E72, 'M', 'ṳ'),
(0x1E73, 'V'),
(0x1E74, 'M', 'ṵ'),
(0x1E75, 'V'),
(0x1E76, 'M', 'ṷ'),
(0x1E77, 'V'),
(0x1E78, 'M', 'ṹ'),
(0x1E79, 'V'),
(0x1E7A, 'M', 'ṻ'),
(0x1E7B, 'V'),
(0x1E7C, 'M', 'ṽ'),
(0x1E7D, 'V'),
(0x1E7E, 'M', 'ṿ'),
(0x1E7F, 'V'),
(0x1E80, 'M', 'ẁ'),
(0x1E81, 'V'),
(0x1E82, 'M', 'ẃ'),
(0x1E83, 'V'),
(0x1E84, 'M', 'ẅ'),
(0x1E85, 'V'),
(0x1E86, 'M', 'ẇ'),
(0x1E87, 'V'),
]
def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1E88, 'M', 'ẉ'),
(0x1E89, 'V'),
(0x1E8A, 'M', 'ẋ'),
(0x1E8B, 'V'),
(0x1E8C, 'M', 'ẍ'),
(0x1E8D, 'V'),
(0x1E8E, 'M', 'ẏ'),
(0x1E8F, 'V'),
(0x1E90, 'M', 'ẑ'),
(0x1E91, 'V'),
(0x1E92, 'M', 'ẓ'),
(0x1E93, 'V'),
(0x1E94, 'M', 'ẕ'),
(0x1E95, 'V'),
(0x1E9A, 'M', 'aʾ'),
(0x1E9B, 'M', 'ṡ'),
(0x1E9C, 'V'),
(0x1E9E, 'M', 'ss'),
(0x1E9F, 'V'),
(0x1EA0, 'M', 'ạ'),
(0x1EA1, 'V'),
(0x1EA2, 'M', 'ả'),
(0x1EA3, 'V'),
(0x1EA4, 'M', 'ấ'),
(0x1EA5, 'V'),
(0x1EA6, 'M', 'ầ'),
(0x1EA7, 'V'),
(0x1EA8, 'M', 'ẩ'),
(0x1EA9, 'V'),
(0x1EAA, 'M', 'ẫ'),
(0x1EAB, 'V'),
(0x1EAC, 'M', 'ậ'),
(0x1EAD, 'V'),
(0x1EAE, 'M', 'ắ'),
(0x1EAF, 'V'),
(0x1EB0, 'M', 'ằ'),
(0x1EB1, 'V'),
(0x1EB2, 'M', 'ẳ'),
(0x1EB3, 'V'),
(0x1EB4, 'M', 'ẵ'),
(0x1EB5, 'V'),
(0x1EB6, 'M', 'ặ'),
(0x1EB7, 'V'),
(0x1EB8, 'M', 'ẹ'),
(0x1EB9, 'V'),
(0x1EBA, 'M', 'ẻ'),
(0x1EBB, 'V'),
(0x1EBC, 'M', 'ẽ'),
(0x1EBD, 'V'),
(0x1EBE, 'M', 'ế'),
(0x1EBF, 'V'),
(0x1EC0, 'M', 'ề'),
(0x1EC1, 'V'),
(0x1EC2, 'M', 'ể'),
(0x1EC3, 'V'),
(0x1EC4, 'M', 'ễ'),
(0x1EC5, 'V'),
(0x1EC6, 'M', 'ệ'),
(0x1EC7, 'V'),
(0x1EC8, 'M', 'ỉ'),
(0x1EC9, 'V'),
(0x1ECA, 'M', 'ị'),
(0x1ECB, 'V'),
(0x1ECC, 'M', 'ọ'),
(0x1ECD, 'V'),
(0x1ECE, 'M', 'ỏ'),
(0x1ECF, 'V'),
(0x1ED0, 'M', 'ố'),
(0x1ED1, 'V'),
(0x1ED2, 'M', 'ồ'),
(0x1ED3, 'V'),
(0x1ED4, 'M', 'ổ'),
(0x1ED5, 'V'),
(0x1ED6, 'M', 'ỗ'),
(0x1ED7, 'V'),
(0x1ED8, 'M', 'ộ'),
(0x1ED9, 'V'),
(0x1EDA, 'M', 'ớ'),
(0x1EDB, 'V'),
(0x1EDC, 'M', 'ờ'),
(0x1EDD, 'V'),
(0x1EDE, 'M', 'ở'),
(0x1EDF, 'V'),
(0x1EE0, 'M', 'ỡ'),
(0x1EE1, 'V'),
(0x1EE2, 'M', 'ợ'),
(0x1EE3, 'V'),
(0x1EE4, 'M', 'ụ'),
(0x1EE5, 'V'),
(0x1EE6, 'M', 'ủ'),
(0x1EE7, 'V'),
(0x1EE8, 'M', 'ứ'),
(0x1EE9, 'V'),
(0x1EEA, 'M', 'ừ'),
(0x1EEB, 'V'),
(0x1EEC, 'M', 'ử'),
(0x1EED, 'V'),
(0x1EEE, 'M', 'ữ'),
(0x1EEF, 'V'),
(0x1EF0, 'M', 'ự'),
]
def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1EF1, 'V'),
(0x1EF2, 'M', 'ỳ'),
(0x1EF3, 'V'),
(0x1EF4, 'M', 'ỵ'),
(0x1EF5, 'V'),
(0x1EF6, 'M', 'ỷ'),
(0x1EF7, 'V'),
(0x1EF8, 'M', 'ỹ'),
(0x1EF9, 'V'),
(0x1EFA, 'M', 'ỻ'),
(0x1EFB, 'V'),
(0x1EFC, 'M', 'ỽ'),
(0x1EFD, 'V'),
(0x1EFE, 'M', 'ỿ'),
(0x1EFF, 'V'),
(0x1F08, 'M', 'ἀ'),
(0x1F09, 'M', 'ἁ'),
(0x1F0A, 'M', 'ἂ'),
(0x1F0B, 'M', 'ἃ'),
(0x1F0C, 'M', 'ἄ'),
(0x1F0D, 'M', 'ἅ'),
(0x1F0E, 'M', 'ἆ'),
(0x1F0F, 'M', 'ἇ'),
(0x1F10, 'V'),
(0x1F16, 'X'),
(0x1F18, 'M', 'ἐ'),
(0x1F19, 'M', 'ἑ'),
(0x1F1A, 'M', 'ἒ'),
(0x1F1B, 'M', 'ἓ'),
(0x1F1C, 'M', 'ἔ'),
(0x1F1D, 'M', 'ἕ'),
(0x1F1E, 'X'),
(0x1F20, 'V'),
(0x1F28, 'M', 'ἠ'),
(0x1F29, 'M', 'ἡ'),
(0x1F2A, 'M', 'ἢ'),
(0x1F2B, 'M', 'ἣ'),
(0x1F2C, 'M', 'ἤ'),
(0x1F2D, 'M', 'ἥ'),
(0x1F2E, 'M', 'ἦ'),
(0x1F2F, 'M', 'ἧ'),
(0x1F30, 'V'),
(0x1F38, 'M', 'ἰ'),
(0x1F39, 'M', 'ἱ'),
(0x1F3A, 'M', 'ἲ'),
(0x1F3B, 'M', 'ἳ'),
(0x1F3C, 'M', 'ἴ'),
(0x1F3D, 'M', 'ἵ'),
(0x1F3E, 'M', 'ἶ'),
(0x1F3F, 'M', 'ἷ'),
(0x1F40, 'V'),
(0x1F46, 'X'),
(0x1F48, 'M', 'ὀ'),
(0x1F49, 'M', 'ὁ'),
(0x1F4A, 'M', 'ὂ'),
(0x1F4B, 'M', 'ὃ'),
(0x1F4C, 'M', 'ὄ'),
(0x1F4D, 'M', 'ὅ'),
(0x1F4E, 'X'),
(0x1F50, 'V'),
(0x1F58, 'X'),
(0x1F59, 'M', 'ὑ'),
(0x1F5A, 'X'),
(0x1F5B, 'M', 'ὓ'),
(0x1F5C, 'X'),
(0x1F5D, 'M', 'ὕ'),
(0x1F5E, 'X'),
(0x1F5F, 'M', 'ὗ'),
(0x1F60, 'V'),
(0x1F68, 'M', 'ὠ'),
(0x1F69, 'M', 'ὡ'),
(0x1F6A, 'M', 'ὢ'),
(0x1F6B, 'M', 'ὣ'),
(0x1F6C, 'M', 'ὤ'),
(0x1F6D, 'M', 'ὥ'),
(0x1F6E, 'M', 'ὦ'),
(0x1F6F, 'M', 'ὧ'),
(0x1F70, 'V'),
(0x1F71, 'M', 'ά'),
(0x1F72, 'V'),
(0x1F73, 'M', 'έ'),
(0x1F74, 'V'),
(0x1F75, 'M', 'ή'),
(0x1F76, 'V'),
(0x1F77, 'M', 'ί'),
(0x1F78, 'V'),
(0x1F79, 'M', 'ό'),
(0x1F7A, 'V'),
(0x1F7B, 'M', 'ύ'),
(0x1F7C, 'V'),
(0x1F7D, 'M', 'ώ'),
(0x1F7E, 'X'),
(0x1F80, 'M', 'ἀι'),
(0x1F81, 'M', 'ἁι'),
(0x1F82, 'M', 'ἂι'),
(0x1F83, 'M', 'ἃι'),
(0x1F84, 'M', 'ἄι'),
(0x1F85, 'M', 'ἅι'),
(0x1F86, 'M', 'ἆι'),
(0x1F87, 'M', 'ἇι'),
]
def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1F88, 'M', 'ἀι'),
(0x1F89, 'M', 'ἁι'),
(0x1F8A, 'M', 'ἂι'),
(0x1F8B, 'M', 'ἃι'),
(0x1F8C, 'M', 'ἄι'),
(0x1F8D, 'M', 'ἅι'),
(0x1F8E, 'M', 'ἆι'),
(0x1F8F, 'M', 'ἇι'),
(0x1F90, 'M', 'ἠι'),
(0x1F91, 'M', 'ἡι'),
(0x1F92, 'M', 'ἢι'),
(0x1F93, 'M', 'ἣι'),
(0x1F94, 'M', 'ἤι'),
(0x1F95, 'M', 'ἥι'),
(0x1F96, 'M', 'ἦι'),
(0x1F97, 'M', 'ἧι'),
(0x1F98, 'M', 'ἠι'),
(0x1F99, 'M', 'ἡι'),
(0x1F9A, 'M', 'ἢι'),
(0x1F9B, 'M', 'ἣι'),
(0x1F9C, 'M', 'ἤι'),
(0x1F9D, 'M', 'ἥι'),
(0x1F9E, 'M', 'ἦι'),
(0x1F9F, 'M', 'ἧι'),
(0x1FA0, 'M', 'ὠι'),
(0x1FA1, 'M', 'ὡι'),
(0x1FA2, 'M', 'ὢι'),
(0x1FA3, 'M', 'ὣι'),
(0x1FA4, 'M', 'ὤι'),
(0x1FA5, 'M', 'ὥι'),
(0x1FA6, 'M', 'ὦι'),
(0x1FA7, 'M', 'ὧι'),
(0x1FA8, 'M', 'ὠι'),
(0x1FA9, 'M', 'ὡι'),
(0x1FAA, 'M', 'ὢι'),
(0x1FAB, 'M', 'ὣι'),
(0x1FAC, 'M', 'ὤι'),
(0x1FAD, 'M', 'ὥι'),
(0x1FAE, 'M', 'ὦι'),
(0x1FAF, 'M', 'ὧι'),
(0x1FB0, 'V'),
(0x1FB2, 'M', 'ὰι'),
(0x1FB3, 'M', 'αι'),
(0x1FB4, 'M', 'άι'),
(0x1FB5, 'X'),
(0x1FB6, 'V'),
(0x1FB7, 'M', 'ᾶι'),
(0x1FB8, 'M', 'ᾰ'),
(0x1FB9, 'M', 'ᾱ'),
(0x1FBA, 'M', 'ὰ'),
(0x1FBB, 'M', 'ά'),
(0x1FBC, 'M', 'αι'),
(0x1FBD, '3', ' ̓'),
(0x1FBE, 'M', 'ι'),
(0x1FBF, '3', ' ̓'),
(0x1FC0, '3', ' ͂'),
(0x1FC1, '3', ' ̈͂'),
(0x1FC2, 'M', 'ὴι'),
(0x1FC3, 'M', 'ηι'),
(0x1FC4, 'M', 'ήι'),
(0x1FC5, 'X'),
(0x1FC6, 'V'),
(0x1FC7, 'M', 'ῆι'),
(0x1FC8, 'M', 'ὲ'),
(0x1FC9, 'M', 'έ'),
(0x1FCA, 'M', 'ὴ'),
(0x1FCB, 'M', 'ή'),
(0x1FCC, 'M', 'ηι'),
(0x1FCD, '3', ' ̓̀'),
(0x1FCE, '3', ' ̓́'),
(0x1FCF, '3', ' ̓͂'),
(0x1FD0, 'V'),
(0x1FD3, 'M', 'ΐ'),
(0x1FD4, 'X'),
(0x1FD6, 'V'),
(0x1FD8, 'M', 'ῐ'),
(0x1FD9, 'M', 'ῑ'),
(0x1FDA, 'M', 'ὶ'),
(0x1FDB, 'M', 'ί'),
(0x1FDC, 'X'),
(0x1FDD, '3', ' ̔̀'),
(0x1FDE, '3', ' ̔́'),
(0x1FDF, '3', ' ̔͂'),
(0x1FE0, 'V'),
(0x1FE3, 'M', 'ΰ'),
(0x1FE4, 'V'),
(0x1FE8, 'M', 'ῠ'),
(0x1FE9, 'M', 'ῡ'),
(0x1FEA, 'M', 'ὺ'),
(0x1FEB, 'M', 'ύ'),
(0x1FEC, 'M', 'ῥ'),
(0x1FED, '3', ' ̈̀'),
(0x1FEE, '3', ' ̈́'),
(0x1FEF, '3', '`'),
(0x1FF0, 'X'),
(0x1FF2, 'M', 'ὼι'),
(0x1FF3, 'M', 'ωι'),
(0x1FF4, 'M', 'ώι'),
(0x1FF5, 'X'),
(0x1FF6, 'V'),
]
def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1FF7, 'M', 'ῶι'),
(0x1FF8, 'M', 'ὸ'),
(0x1FF9, 'M', 'ό'),
(0x1FFA, 'M', 'ὼ'),
(0x1FFB, 'M', 'ώ'),
(0x1FFC, 'M', 'ωι'),
(0x1FFD, '3', ' ́'),
(0x1FFE, '3', ' ̔'),
(0x1FFF, 'X'),
(0x2000, '3', ' '),
(0x200B, 'I'),
(0x200C, 'D', ''),
(0x200E, 'X'),
(0x2010, 'V'),
(0x2011, 'M', '‐'),
(0x2012, 'V'),
(0x2017, '3', ' ̳'),
(0x2018, 'V'),
(0x2024, 'X'),
(0x2027, 'V'),
(0x2028, 'X'),
(0x202F, '3', ' '),
(0x2030, 'V'),
(0x2033, 'M', '′′'),
(0x2034, 'M', '′′′'),
(0x2035, 'V'),
(0x2036, 'M', '‵‵'),
(0x2037, 'M', '‵‵‵'),
(0x2038, 'V'),
(0x203C, '3', '!!'),
(0x203D, 'V'),
(0x203E, '3', ' ̅'),
(0x203F, 'V'),
(0x2047, '3', '??'),
(0x2048, '3', '?!'),
(0x2049, '3', '!?'),
(0x204A, 'V'),
(0x2057, 'M', '′′′′'),
(0x2058, 'V'),
(0x205F, '3', ' '),
(0x2060, 'I'),
(0x2061, 'X'),
(0x2064, 'I'),
(0x2065, 'X'),
(0x2070, 'M', '0'),
(0x2071, 'M', 'i'),
(0x2072, 'X'),
(0x2074, 'M', '4'),
(0x2075, 'M', '5'),
(0x2076, 'M', '6'),
(0x2077, 'M', '7'),
(0x2078, 'M', '8'),
(0x2079, 'M', '9'),
(0x207A, '3', '+'),
(0x207B, 'M', '−'),
(0x207C, '3', '='),
(0x207D, '3', '('),
(0x207E, '3', ')'),
(0x207F, 'M', 'n'),
(0x2080, 'M', '0'),
(0x2081, 'M', '1'),
(0x2082, 'M', '2'),
(0x2083, 'M', '3'),
(0x2084, 'M', '4'),
(0x2085, 'M', '5'),
(0x2086, 'M', '6'),
(0x2087, 'M', '7'),
(0x2088, 'M', '8'),
(0x2089, 'M', '9'),
(0x208A, '3', '+'),
(0x208B, 'M', '−'),
(0x208C, '3', '='),
(0x208D, '3', '('),
(0x208E, '3', ')'),
(0x208F, 'X'),
(0x2090, 'M', 'a'),
(0x2091, 'M', 'e'),
(0x2092, 'M', 'o'),
(0x2093, 'M', 'x'),
(0x2094, 'M', 'ə'),
(0x2095, 'M', 'h'),
(0x2096, 'M', 'k'),
(0x2097, 'M', 'l'),
(0x2098, 'M', 'm'),
(0x2099, 'M', 'n'),
(0x209A, 'M', 'p'),
(0x209B, 'M', 's'),
(0x209C, 'M', 't'),
(0x209D, 'X'),
(0x20A0, 'V'),
(0x20A8, 'M', 'rs'),
(0x20A9, 'V'),
(0x20C1, 'X'),
(0x20D0, 'V'),
(0x20F1, 'X'),
(0x2100, '3', 'a/c'),
(0x2101, '3', 'a/s'),
(0x2102, 'M', 'c'),
(0x2103, 'M', '°c'),
(0x2104, 'V'),
]
def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2105, '3', 'c/o'),
(0x2106, '3', 'c/u'),
(0x2107, 'M', 'ɛ'),
(0x2108, 'V'),
(0x2109, 'M', '°f'),
(0x210A, 'M', 'g'),
(0x210B, 'M', 'h'),
(0x210F, 'M', 'ħ'),
(0x2110, 'M', 'i'),
(0x2112, 'M', 'l'),
(0x2114, 'V'),
(0x2115, 'M', 'n'),
(0x2116, 'M', 'no'),
(0x2117, 'V'),
(0x2119, 'M', 'p'),
(0x211A, 'M', 'q'),
(0x211B, 'M', 'r'),
(0x211E, 'V'),
(0x2120, 'M', 'sm'),
(0x2121, 'M', 'tel'),
(0x2122, 'M', 'tm'),
(0x2123, 'V'),
(0x2124, 'M', 'z'),
(0x2125, 'V'),
(0x2126, 'M', 'ω'),
(0x2127, 'V'),
(0x2128, 'M', 'z'),
(0x2129, 'V'),
(0x212A, 'M', 'k'),
(0x212B, 'M', 'å'),
(0x212C, 'M', 'b'),
(0x212D, 'M', 'c'),
(0x212E, 'V'),
(0x212F, 'M', 'e'),
(0x2131, 'M', 'f'),
(0x2132, 'X'),
(0x2133, 'M', 'm'),
(0x2134, 'M', 'o'),
(0x2135, 'M', 'א'),
(0x2136, 'M', 'ב'),
(0x2137, 'M', 'ג'),
(0x2138, 'M', 'ד'),
(0x2139, 'M', 'i'),
(0x213A, 'V'),
(0x213B, 'M', 'fax'),
(0x213C, 'M', 'π'),
(0x213D, 'M', 'γ'),
(0x213F, 'M', 'π'),
(0x2140, 'M', '∑'),
(0x2141, 'V'),
(0x2145, 'M', 'd'),
(0x2147, 'M', 'e'),
(0x2148, 'M', 'i'),
(0x2149, 'M', 'j'),
(0x214A, 'V'),
(0x2150, 'M', '1⁄7'),
(0x2151, 'M', '1⁄9'),
(0x2152, 'M', '1⁄10'),
(0x2153, 'M', '1⁄3'),
(0x2154, 'M', '2⁄3'),
(0x2155, 'M', '1⁄5'),
(0x2156, 'M', '2⁄5'),
(0x2157, 'M', '3⁄5'),
(0x2158, 'M', '4⁄5'),
(0x2159, 'M', '1⁄6'),
(0x215A, 'M', '5⁄6'),
(0x215B, 'M', '1⁄8'),
(0x215C, 'M', '3⁄8'),
(0x215D, 'M', '5⁄8'),
(0x215E, 'M', '7⁄8'),
(0x215F, 'M', '1⁄'),
(0x2160, 'M', 'i'),
(0x2161, 'M', 'ii'),
(0x2162, 'M', 'iii'),
(0x2163, 'M', 'iv'),
(0x2164, 'M', 'v'),
(0x2165, 'M', 'vi'),
(0x2166, 'M', 'vii'),
(0x2167, 'M', 'viii'),
(0x2168, 'M', 'ix'),
(0x2169, 'M', 'x'),
(0x216A, 'M', 'xi'),
(0x216B, 'M', 'xii'),
(0x216C, 'M', 'l'),
(0x216D, 'M', 'c'),
(0x216E, 'M', 'd'),
(0x216F, 'M', 'm'),
(0x2170, 'M', 'i'),
(0x2171, 'M', 'ii'),
(0x2172, 'M', 'iii'),
(0x2173, 'M', 'iv'),
(0x2174, 'M', 'v'),
(0x2175, 'M', 'vi'),
(0x2176, 'M', 'vii'),
(0x2177, 'M', 'viii'),
(0x2178, 'M', 'ix'),
(0x2179, 'M', 'x'),
(0x217A, 'M', 'xi'),
(0x217B, 'M', 'xii'),
(0x217C, 'M', 'l'),
]
def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x217D, 'M', 'c'),
(0x217E, 'M', 'd'),
(0x217F, 'M', 'm'),
(0x2180, 'V'),
(0x2183, 'X'),
(0x2184, 'V'),
(0x2189, 'M', '0⁄3'),
(0x218A, 'V'),
(0x218C, 'X'),
(0x2190, 'V'),
(0x222C, 'M', '∫∫'),
(0x222D, 'M', '∫∫∫'),
(0x222E, 'V'),
(0x222F, 'M', '∮∮'),
(0x2230, 'M', '∮∮∮'),
(0x2231, 'V'),
(0x2260, '3'),
(0x2261, 'V'),
(0x226E, '3'),
(0x2270, 'V'),
(0x2329, 'M', '〈'),
(0x232A, 'M', '〉'),
(0x232B, 'V'),
(0x2427, 'X'),
(0x2440, 'V'),
(0x244B, 'X'),
(0x2460, 'M', '1'),
(0x2461, 'M', '2'),
(0x2462, 'M', '3'),
(0x2463, 'M', '4'),
(0x2464, 'M', '5'),
(0x2465, 'M', '6'),
(0x2466, 'M', '7'),
(0x2467, 'M', '8'),
(0x2468, 'M', '9'),
(0x2469, 'M', '10'),
(0x246A, 'M', '11'),
(0x246B, 'M', '12'),
(0x246C, 'M', '13'),
(0x246D, 'M', '14'),
(0x246E, 'M', '15'),
(0x246F, 'M', '16'),
(0x2470, 'M', '17'),
(0x2471, 'M', '18'),
(0x2472, 'M', '19'),
(0x2473, 'M', '20'),
(0x2474, '3', '(1)'),
(0x2475, '3', '(2)'),
(0x2476, '3', '(3)'),
(0x2477, '3', '(4)'),
(0x2478, '3', '(5)'),
(0x2479, '3', '(6)'),
(0x247A, '3', '(7)'),
(0x247B, '3', '(8)'),
(0x247C, '3', '(9)'),
(0x247D, '3', '(10)'),
(0x247E, '3', '(11)'),
(0x247F, '3', '(12)'),
(0x2480, '3', '(13)'),
(0x2481, '3', '(14)'),
(0x2482, '3', '(15)'),
(0x2483, '3', '(16)'),
(0x2484, '3', '(17)'),
(0x2485, '3', '(18)'),
(0x2486, '3', '(19)'),
(0x2487, '3', '(20)'),
(0x2488, 'X'),
(0x249C, '3', '(a)'),
(0x249D, '3', '(b)'),
(0x249E, '3', '(c)'),
(0x249F, '3', '(d)'),
(0x24A0, '3', '(e)'),
(0x24A1, '3', '(f)'),
(0x24A2, '3', '(g)'),
(0x24A3, '3', '(h)'),
(0x24A4, '3', '(i)'),
(0x24A5, '3', '(j)'),
(0x24A6, '3', '(k)'),
(0x24A7, '3', '(l)'),
(0x24A8, '3', '(m)'),
(0x24A9, '3', '(n)'),
(0x24AA, '3', '(o)'),
(0x24AB, '3', '(p)'),
(0x24AC, '3', '(q)'),
(0x24AD, '3', '(r)'),
(0x24AE, '3', '(s)'),
(0x24AF, '3', '(t)'),
(0x24B0, '3', '(u)'),
(0x24B1, '3', '(v)'),
(0x24B2, '3', '(w)'),
(0x24B3, '3', '(x)'),
(0x24B4, '3', '(y)'),
(0x24B5, '3', '(z)'),
(0x24B6, 'M', 'a'),
(0x24B7, 'M', 'b'),
(0x24B8, 'M', 'c'),
(0x24B9, 'M', 'd'),
(0x24BA, 'M', 'e'),
(0x24BB, 'M', 'f'),
(0x24BC, 'M', 'g'),
]
def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x24BD, 'M', 'h'),
(0x24BE, 'M', 'i'),
(0x24BF, 'M', 'j'),
(0x24C0, 'M', 'k'),
(0x24C1, 'M', 'l'),
(0x24C2, 'M', 'm'),
(0x24C3, 'M', 'n'),
(0x24C4, 'M', 'o'),
(0x24C5, 'M', 'p'),
(0x24C6, 'M', 'q'),
(0x24C7, 'M', 'r'),
(0x24C8, 'M', 's'),
(0x24C9, 'M', 't'),
(0x24CA, 'M', 'u'),
(0x24CB, 'M', 'v'),
(0x24CC, 'M', 'w'),
(0x24CD, 'M', 'x'),
(0x24CE, 'M', 'y'),
(0x24CF, 'M', 'z'),
(0x24D0, 'M', 'a'),
(0x24D1, 'M', 'b'),
(0x24D2, 'M', 'c'),
(0x24D3, 'M', 'd'),
(0x24D4, 'M', 'e'),
(0x24D5, 'M', 'f'),
(0x24D6, 'M', 'g'),
(0x24D7, 'M', 'h'),
(0x24D8, 'M', 'i'),
(0x24D9, 'M', 'j'),
(0x24DA, 'M', 'k'),
(0x24DB, 'M', 'l'),
(0x24DC, 'M', 'm'),
(0x24DD, 'M', 'n'),
(0x24DE, 'M', 'o'),
(0x24DF, 'M', 'p'),
(0x24E0, 'M', 'q'),
(0x24E1, 'M', 'r'),
(0x24E2, 'M', 's'),
(0x24E3, 'M', 't'),
(0x24E4, 'M', 'u'),
(0x24E5, 'M', 'v'),
(0x24E6, 'M', 'w'),
(0x24E7, 'M', 'x'),
(0x24E8, 'M', 'y'),
(0x24E9, 'M', 'z'),
(0x24EA, 'M', '0'),
(0x24EB, 'V'),
(0x2A0C, 'M', '∫∫∫∫'),
(0x2A0D, 'V'),
(0x2A74, '3', '::='),
(0x2A75, '3', '=='),
(0x2A76, '3', '==='),
(0x2A77, 'V'),
(0x2ADC, 'M', '⫝̸'),
(0x2ADD, 'V'),
(0x2B74, 'X'),
(0x2B76, 'V'),
(0x2B96, 'X'),
(0x2B97, 'V'),
(0x2C00, 'M', 'ⰰ'),
(0x2C01, 'M', 'ⰱ'),
(0x2C02, 'M', 'ⰲ'),
(0x2C03, 'M', 'ⰳ'),
(0x2C04, 'M', 'ⰴ'),
(0x2C05, 'M', 'ⰵ'),
(0x2C06, 'M', 'ⰶ'),
(0x2C07, 'M', 'ⰷ'),
(0x2C08, 'M', 'ⰸ'),
(0x2C09, 'M', 'ⰹ'),
(0x2C0A, 'M', 'ⰺ'),
(0x2C0B, 'M', 'ⰻ'),
(0x2C0C, 'M', 'ⰼ'),
(0x2C0D, 'M', 'ⰽ'),
(0x2C0E, 'M', 'ⰾ'),
(0x2C0F, 'M', 'ⰿ'),
(0x2C10, 'M', 'ⱀ'),
(0x2C11, 'M', 'ⱁ'),
(0x2C12, 'M', 'ⱂ'),
(0x2C13, 'M', 'ⱃ'),
(0x2C14, 'M', 'ⱄ'),
(0x2C15, 'M', 'ⱅ'),
(0x2C16, 'M', 'ⱆ'),
(0x2C17, 'M', 'ⱇ'),
(0x2C18, 'M', 'ⱈ'),
(0x2C19, 'M', 'ⱉ'),
(0x2C1A, 'M', 'ⱊ'),
(0x2C1B, 'M', 'ⱋ'),
(0x2C1C, 'M', 'ⱌ'),
(0x2C1D, 'M', 'ⱍ'),
(0x2C1E, 'M', 'ⱎ'),
(0x2C1F, 'M', 'ⱏ'),
(0x2C20, 'M', 'ⱐ'),
(0x2C21, 'M', 'ⱑ'),
(0x2C22, 'M', 'ⱒ'),
(0x2C23, 'M', 'ⱓ'),
(0x2C24, 'M', 'ⱔ'),
(0x2C25, 'M', 'ⱕ'),
(0x2C26, 'M', 'ⱖ'),
(0x2C27, 'M', 'ⱗ'),
(0x2C28, 'M', 'ⱘ'),
]
def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2C29, 'M', 'ⱙ'),
(0x2C2A, 'M', 'ⱚ'),
(0x2C2B, 'M', 'ⱛ'),
(0x2C2C, 'M', 'ⱜ'),
(0x2C2D, 'M', 'ⱝ'),
(0x2C2E, 'M', 'ⱞ'),
(0x2C2F, 'M', 'ⱟ'),
(0x2C30, 'V'),
(0x2C60, 'M', 'ⱡ'),
(0x2C61, 'V'),
(0x2C62, 'M', 'ɫ'),
(0x2C63, 'M', 'ᵽ'),
(0x2C64, 'M', 'ɽ'),
(0x2C65, 'V'),
(0x2C67, 'M', 'ⱨ'),
(0x2C68, 'V'),
(0x2C69, 'M', 'ⱪ'),
(0x2C6A, 'V'),
(0x2C6B, 'M', 'ⱬ'),
(0x2C6C, 'V'),
(0x2C6D, 'M', 'ɑ'),
(0x2C6E, 'M', 'ɱ'),
(0x2C6F, 'M', 'ɐ'),
(0x2C70, 'M', 'ɒ'),
(0x2C71, 'V'),
(0x2C72, 'M', 'ⱳ'),
(0x2C73, 'V'),
(0x2C75, 'M', 'ⱶ'),
(0x2C76, 'V'),
(0x2C7C, 'M', 'j'),
(0x2C7D, 'M', 'v'),
(0x2C7E, 'M', 'ȿ'),
(0x2C7F, 'M', 'ɀ'),
(0x2C80, 'M', 'ⲁ'),
(0x2C81, 'V'),
(0x2C82, 'M', 'ⲃ'),
(0x2C83, 'V'),
(0x2C84, 'M', 'ⲅ'),
(0x2C85, 'V'),
(0x2C86, 'M', 'ⲇ'),
(0x2C87, 'V'),
(0x2C88, 'M', 'ⲉ'),
(0x2C89, 'V'),
(0x2C8A, 'M', 'ⲋ'),
(0x2C8B, 'V'),
(0x2C8C, 'M', 'ⲍ'),
(0x2C8D, 'V'),
(0x2C8E, 'M', 'ⲏ'),
(0x2C8F, 'V'),
(0x2C90, 'M', 'ⲑ'),
(0x2C91, 'V'),
(0x2C92, 'M', 'ⲓ'),
(0x2C93, 'V'),
(0x2C94, 'M', 'ⲕ'),
(0x2C95, 'V'),
(0x2C96, 'M', 'ⲗ'),
(0x2C97, 'V'),
(0x2C98, 'M', 'ⲙ'),
(0x2C99, 'V'),
(0x2C9A, 'M', 'ⲛ'),
(0x2C9B, 'V'),
(0x2C9C, 'M', 'ⲝ'),
(0x2C9D, 'V'),
(0x2C9E, 'M', 'ⲟ'),
(0x2C9F, 'V'),
(0x2CA0, 'M', 'ⲡ'),
(0x2CA1, 'V'),
(0x2CA2, 'M', 'ⲣ'),
(0x2CA3, 'V'),
(0x2CA4, 'M', 'ⲥ'),
(0x2CA5, 'V'),
(0x2CA6, 'M', 'ⲧ'),
(0x2CA7, 'V'),
(0x2CA8, 'M', 'ⲩ'),
(0x2CA9, 'V'),
(0x2CAA, 'M', 'ⲫ'),
(0x2CAB, 'V'),
(0x2CAC, 'M', 'ⲭ'),
(0x2CAD, 'V'),
(0x2CAE, 'M', 'ⲯ'),
(0x2CAF, 'V'),
(0x2CB0, 'M', 'ⲱ'),
(0x2CB1, 'V'),
(0x2CB2, 'M', 'ⲳ'),
(0x2CB3, 'V'),
(0x2CB4, 'M', 'ⲵ'),
(0x2CB5, 'V'),
(0x2CB6, 'M', 'ⲷ'),
(0x2CB7, 'V'),
(0x2CB8, 'M', 'ⲹ'),
(0x2CB9, 'V'),
(0x2CBA, 'M', 'ⲻ'),
(0x2CBB, 'V'),
(0x2CBC, 'M', 'ⲽ'),
(0x2CBD, 'V'),
(0x2CBE, 'M', 'ⲿ'),
(0x2CBF, 'V'),
(0x2CC0, 'M', 'ⳁ'),
(0x2CC1, 'V'),
(0x2CC2, 'M', 'ⳃ'),
]
def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2CC3, 'V'),
(0x2CC4, 'M', 'ⳅ'),
(0x2CC5, 'V'),
(0x2CC6, 'M', 'ⳇ'),
(0x2CC7, 'V'),
(0x2CC8, 'M', 'ⳉ'),
(0x2CC9, 'V'),
(0x2CCA, 'M', 'ⳋ'),
(0x2CCB, 'V'),
(0x2CCC, 'M', 'ⳍ'),
(0x2CCD, 'V'),
(0x2CCE, 'M', 'ⳏ'),
(0x2CCF, 'V'),
(0x2CD0, 'M', 'ⳑ'),
(0x2CD1, 'V'),
(0x2CD2, 'M', 'ⳓ'),
(0x2CD3, 'V'),
(0x2CD4, 'M', 'ⳕ'),
(0x2CD5, 'V'),
(0x2CD6, 'M', 'ⳗ'),
(0x2CD7, 'V'),
(0x2CD8, 'M', 'ⳙ'),
(0x2CD9, 'V'),
(0x2CDA, 'M', 'ⳛ'),
(0x2CDB, 'V'),
(0x2CDC, 'M', 'ⳝ'),
(0x2CDD, 'V'),
(0x2CDE, 'M', 'ⳟ'),
(0x2CDF, 'V'),
(0x2CE0, 'M', 'ⳡ'),
(0x2CE1, 'V'),
(0x2CE2, 'M', 'ⳣ'),
(0x2CE3, 'V'),
(0x2CEB, 'M', 'ⳬ'),
(0x2CEC, 'V'),
(0x2CED, 'M', 'ⳮ'),
(0x2CEE, 'V'),
(0x2CF2, 'M', 'ⳳ'),
(0x2CF3, 'V'),
(0x2CF4, 'X'),
(0x2CF9, 'V'),
(0x2D26, 'X'),
(0x2D27, 'V'),
(0x2D28, 'X'),
(0x2D2D, 'V'),
(0x2D2E, 'X'),
(0x2D30, 'V'),
(0x2D68, 'X'),
(0x2D6F, 'M', 'ⵡ'),
(0x2D70, 'V'),
(0x2D71, 'X'),
(0x2D7F, 'V'),
(0x2D97, 'X'),
(0x2DA0, 'V'),
(0x2DA7, 'X'),
(0x2DA8, 'V'),
(0x2DAF, 'X'),
(0x2DB0, 'V'),
(0x2DB7, 'X'),
(0x2DB8, 'V'),
(0x2DBF, 'X'),
(0x2DC0, 'V'),
(0x2DC7, 'X'),
(0x2DC8, 'V'),
(0x2DCF, 'X'),
(0x2DD0, 'V'),
(0x2DD7, 'X'),
(0x2DD8, 'V'),
(0x2DDF, 'X'),
(0x2DE0, 'V'),
(0x2E5E, 'X'),
(0x2E80, 'V'),
(0x2E9A, 'X'),
(0x2E9B, 'V'),
(0x2E9F, 'M', '母'),
(0x2EA0, 'V'),
(0x2EF3, 'M', '龟'),
(0x2EF4, 'X'),
(0x2F00, 'M', '一'),
(0x2F01, 'M', '丨'),
(0x2F02, 'M', '丶'),
(0x2F03, 'M', '丿'),
(0x2F04, 'M', '乙'),
(0x2F05, 'M', '亅'),
(0x2F06, 'M', '二'),
(0x2F07, 'M', '亠'),
(0x2F08, 'M', '人'),
(0x2F09, 'M', '儿'),
(0x2F0A, 'M', '入'),
(0x2F0B, 'M', '八'),
(0x2F0C, 'M', '冂'),
(0x2F0D, 'M', '冖'),
(0x2F0E, 'M', '冫'),
(0x2F0F, 'M', '几'),
(0x2F10, 'M', '凵'),
(0x2F11, 'M', '刀'),
(0x2F12, 'M', '力'),
(0x2F13, 'M', '勹'),
(0x2F14, 'M', '匕'),
(0x2F15, 'M', '匚'),
]
def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F16, 'M', '匸'),
(0x2F17, 'M', '十'),
(0x2F18, 'M', '卜'),
(0x2F19, 'M', '卩'),
(0x2F1A, 'M', '厂'),
(0x2F1B, 'M', '厶'),
(0x2F1C, 'M', '又'),
(0x2F1D, 'M', '口'),
(0x2F1E, 'M', '囗'),
(0x2F1F, 'M', '土'),
(0x2F20, 'M', '士'),
(0x2F21, 'M', '夂'),
(0x2F22, 'M', '夊'),
(0x2F23, 'M', '夕'),
(0x2F24, 'M', '大'),
(0x2F25, 'M', '女'),
(0x2F26, 'M', '子'),
(0x2F27, 'M', '宀'),
(0x2F28, 'M', '寸'),
(0x2F29, 'M', '小'),
(0x2F2A, 'M', '尢'),
(0x2F2B, 'M', '尸'),
(0x2F2C, 'M', '屮'),
(0x2F2D, 'M', '山'),
(0x2F2E, 'M', '巛'),
(0x2F2F, 'M', '工'),
(0x2F30, 'M', '己'),
(0x2F31, 'M', '巾'),
(0x2F32, 'M', '干'),
(0x2F33, 'M', '幺'),
(0x2F34, 'M', '广'),
(0x2F35, 'M', '廴'),
(0x2F36, 'M', '廾'),
(0x2F37, 'M', '弋'),
(0x2F38, 'M', '弓'),
(0x2F39, 'M', '彐'),
(0x2F3A, 'M', '彡'),
(0x2F3B, 'M', '彳'),
(0x2F3C, 'M', '心'),
(0x2F3D, 'M', '戈'),
(0x2F3E, 'M', '戶'),
(0x2F3F, 'M', '手'),
(0x2F40, 'M', '支'),
(0x2F41, 'M', '攴'),
(0x2F42, 'M', '文'),
(0x2F43, 'M', '斗'),
(0x2F44, 'M', '斤'),
(0x2F45, 'M', '方'),
(0x2F46, 'M', '无'),
(0x2F47, 'M', '日'),
(0x2F48, 'M', '曰'),
(0x2F49, 'M', '月'),
(0x2F4A, 'M', '木'),
(0x2F4B, 'M', '欠'),
(0x2F4C, 'M', '止'),
(0x2F4D, 'M', '歹'),
(0x2F4E, 'M', '殳'),
(0x2F4F, 'M', '毋'),
(0x2F50, 'M', '比'),
(0x2F51, 'M', '毛'),
(0x2F52, 'M', '氏'),
(0x2F53, 'M', '气'),
(0x2F54, 'M', '水'),
(0x2F55, 'M', '火'),
(0x2F56, 'M', '爪'),
(0x2F57, 'M', '父'),
(0x2F58, 'M', '爻'),
(0x2F59, 'M', '爿'),
(0x2F5A, 'M', '片'),
(0x2F5B, 'M', '牙'),
(0x2F5C, 'M', '牛'),
(0x2F5D, 'M', '犬'),
(0x2F5E, 'M', '玄'),
(0x2F5F, 'M', '玉'),
(0x2F60, 'M', '瓜'),
(0x2F61, 'M', '瓦'),
(0x2F62, 'M', '甘'),
(0x2F63, 'M', '生'),
(0x2F64, 'M', '用'),
(0x2F65, 'M', '田'),
(0x2F66, 'M', '疋'),
(0x2F67, 'M', '疒'),
(0x2F68, 'M', '癶'),
(0x2F69, 'M', '白'),
(0x2F6A, 'M', '皮'),
(0x2F6B, 'M', '皿'),
(0x2F6C, 'M', '目'),
(0x2F6D, 'M', '矛'),
(0x2F6E, 'M', '矢'),
(0x2F6F, 'M', '石'),
(0x2F70, 'M', '示'),
(0x2F71, 'M', '禸'),
(0x2F72, 'M', '禾'),
(0x2F73, 'M', '穴'),
(0x2F74, 'M', '立'),
(0x2F75, 'M', '竹'),
(0x2F76, 'M', '米'),
(0x2F77, 'M', '糸'),
(0x2F78, 'M', '缶'),
(0x2F79, 'M', '网'),
]
def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F7A, 'M', '羊'),
(0x2F7B, 'M', '羽'),
(0x2F7C, 'M', '老'),
(0x2F7D, 'M', '而'),
(0x2F7E, 'M', '耒'),
(0x2F7F, 'M', '耳'),
(0x2F80, 'M', '聿'),
(0x2F81, 'M', '肉'),
(0x2F82, 'M', '臣'),
(0x2F83, 'M', '自'),
(0x2F84, 'M', '至'),
(0x2F85, 'M', '臼'),
(0x2F86, 'M', '舌'),
(0x2F87, 'M', '舛'),
(0x2F88, 'M', '舟'),
(0x2F89, 'M', '艮'),
(0x2F8A, 'M', '色'),
(0x2F8B, 'M', '艸'),
(0x2F8C, 'M', '虍'),
(0x2F8D, 'M', '虫'),
(0x2F8E, 'M', '血'),
(0x2F8F, 'M', '行'),
(0x2F90, 'M', '衣'),
(0x2F91, 'M', '襾'),
(0x2F92, 'M', '見'),
(0x2F93, 'M', '角'),
(0x2F94, 'M', '言'),
(0x2F95, 'M', '谷'),
(0x2F96, 'M', '豆'),
(0x2F97, 'M', '豕'),
(0x2F98, 'M', '豸'),
(0x2F99, 'M', '貝'),
(0x2F9A, 'M', '赤'),
(0x2F9B, 'M', '走'),
(0x2F9C, 'M', '足'),
(0x2F9D, 'M', '身'),
(0x2F9E, 'M', '車'),
(0x2F9F, 'M', '辛'),
(0x2FA0, 'M', '辰'),
(0x2FA1, 'M', '辵'),
(0x2FA2, 'M', '邑'),
(0x2FA3, 'M', '酉'),
(0x2FA4, 'M', '釆'),
(0x2FA5, 'M', '里'),
(0x2FA6, 'M', '金'),
(0x2FA7, 'M', '長'),
(0x2FA8, 'M', '門'),
(0x2FA9, 'M', '阜'),
(0x2FAA, 'M', '隶'),
(0x2FAB, 'M', '隹'),
(0x2FAC, 'M', '雨'),
(0x2FAD, 'M', '靑'),
(0x2FAE, 'M', '非'),
(0x2FAF, 'M', '面'),
(0x2FB0, 'M', '革'),
(0x2FB1, 'M', '韋'),
(0x2FB2, 'M', '韭'),
(0x2FB3, 'M', '音'),
(0x2FB4, 'M', '頁'),
(0x2FB5, 'M', '風'),
(0x2FB6, 'M', '飛'),
(0x2FB7, 'M', '食'),
(0x2FB8, 'M', '首'),
(0x2FB9, 'M', '香'),
(0x2FBA, 'M', '馬'),
(0x2FBB, 'M', '骨'),
(0x2FBC, 'M', '高'),
(0x2FBD, 'M', '髟'),
(0x2FBE, 'M', '鬥'),
(0x2FBF, 'M', '鬯'),
(0x2FC0, 'M', '鬲'),
(0x2FC1, 'M', '鬼'),
(0x2FC2, 'M', '魚'),
(0x2FC3, 'M', '鳥'),
(0x2FC4, 'M', '鹵'),
(0x2FC5, 'M', '鹿'),
(0x2FC6, 'M', '麥'),
(0x2FC7, 'M', '麻'),
(0x2FC8, 'M', '黃'),
(0x2FC9, 'M', '黍'),
(0x2FCA, 'M', '黑'),
(0x2FCB, 'M', '黹'),
(0x2FCC, 'M', '黽'),
(0x2FCD, 'M', '鼎'),
(0x2FCE, 'M', '鼓'),
(0x2FCF, 'M', '鼠'),
(0x2FD0, 'M', '鼻'),
(0x2FD1, 'M', '齊'),
(0x2FD2, 'M', '齒'),
(0x2FD3, 'M', '龍'),
(0x2FD4, 'M', '龜'),
(0x2FD5, 'M', '龠'),
(0x2FD6, 'X'),
(0x3000, '3', ' '),
(0x3001, 'V'),
(0x3002, 'M', '.'),
(0x3003, 'V'),
(0x3036, 'M', '〒'),
(0x3037, 'V'),
(0x3038, 'M', '十'),
]
def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x3039, 'M', '卄'),
(0x303A, 'M', '卅'),
(0x303B, 'V'),
(0x3040, 'X'),
(0x3041, 'V'),
(0x3097, 'X'),
(0x3099, 'V'),
(0x309B, '3', ' ゙'),
(0x309C, '3', ' ゚'),
(0x309D, 'V'),
(0x309F, 'M', 'より'),
(0x30A0, 'V'),
(0x30FF, 'M', 'コト'),
(0x3100, 'X'),
(0x3105, 'V'),
(0x3130, 'X'),
(0x3131, 'M', 'ᄀ'),
(0x3132, 'M', 'ᄁ'),
(0x3133, 'M', 'ᆪ'),
(0x3134, 'M', 'ᄂ'),
(0x3135, 'M', 'ᆬ'),
(0x3136, 'M', 'ᆭ'),
(0x3137, 'M', 'ᄃ'),
(0x3138, 'M', 'ᄄ'),
(0x3139, 'M', 'ᄅ'),
(0x313A, 'M', 'ᆰ'),
(0x313B, 'M', 'ᆱ'),
(0x313C, 'M', 'ᆲ'),
(0x313D, 'M', 'ᆳ'),
(0x313E, 'M', 'ᆴ'),
(0x313F, 'M', 'ᆵ'),
(0x3140, 'M', 'ᄚ'),
(0x3141, 'M', 'ᄆ'),
(0x3142, 'M', 'ᄇ'),
(0x3143, 'M', 'ᄈ'),
(0x3144, 'M', 'ᄡ'),
(0x3145, 'M', 'ᄉ'),
(0x3146, 'M', 'ᄊ'),
(0x3147, 'M', 'ᄋ'),
(0x3148, 'M', 'ᄌ'),
(0x3149, 'M', 'ᄍ'),
(0x314A, 'M', 'ᄎ'),
(0x314B, 'M', 'ᄏ'),
(0x314C, 'M', 'ᄐ'),
(0x314D, 'M', 'ᄑ'),
(0x314E, 'M', 'ᄒ'),
(0x314F, 'M', 'ᅡ'),
(0x3150, 'M', 'ᅢ'),
(0x3151, 'M', 'ᅣ'),
(0x3152, 'M', 'ᅤ'),
(0x3153, 'M', 'ᅥ'),
(0x3154, 'M', 'ᅦ'),
(0x3155, 'M', 'ᅧ'),
(0x3156, 'M', 'ᅨ'),
(0x3157, 'M', 'ᅩ'),
(0x3158, 'M', 'ᅪ'),
(0x3159, 'M', 'ᅫ'),
(0x315A, 'M', 'ᅬ'),
(0x315B, 'M', 'ᅭ'),
(0x315C, 'M', 'ᅮ'),
(0x315D, 'M', 'ᅯ'),
(0x315E, 'M', 'ᅰ'),
(0x315F, 'M', 'ᅱ'),
(0x3160, 'M', 'ᅲ'),
(0x3161, 'M', 'ᅳ'),
(0x3162, 'M', 'ᅴ'),
(0x3163, 'M', 'ᅵ'),
(0x3164, 'X'),
(0x3165, 'M', 'ᄔ'),
(0x3166, 'M', 'ᄕ'),
(0x3167, 'M', 'ᇇ'),
(0x3168, 'M', 'ᇈ'),
(0x3169, 'M', 'ᇌ'),
(0x316A, 'M', 'ᇎ'),
(0x316B, 'M', 'ᇓ'),
(0x316C, 'M', 'ᇗ'),
(0x316D, 'M', 'ᇙ'),
(0x316E, 'M', 'ᄜ'),
(0x316F, 'M', 'ᇝ'),
(0x3170, 'M', 'ᇟ'),
(0x3171, 'M', 'ᄝ'),
(0x3172, 'M', 'ᄞ'),
(0x3173, 'M', 'ᄠ'),
(0x3174, 'M', 'ᄢ'),
(0x3175, 'M', 'ᄣ'),
(0x3176, 'M', 'ᄧ'),
(0x3177, 'M', 'ᄩ'),
(0x3178, 'M', 'ᄫ'),
(0x3179, 'M', 'ᄬ'),
(0x317A, 'M', 'ᄭ'),
(0x317B, 'M', 'ᄮ'),
(0x317C, 'M', 'ᄯ'),
(0x317D, 'M', 'ᄲ'),
(0x317E, 'M', 'ᄶ'),
(0x317F, 'M', 'ᅀ'),
(0x3180, 'M', 'ᅇ'),
(0x3181, 'M', 'ᅌ'),
(0x3182, 'M', 'ᇱ'),
(0x3183, 'M', 'ᇲ'),
(0x3184, 'M', 'ᅗ'),
]
def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x3185, 'M', 'ᅘ'),
(0x3186, 'M', 'ᅙ'),
(0x3187, 'M', 'ᆄ'),
(0x3188, 'M', 'ᆅ'),
(0x3189, 'M', 'ᆈ'),
(0x318A, 'M', 'ᆑ'),
(0x318B, 'M', 'ᆒ'),
(0x318C, 'M', 'ᆔ'),
(0x318D, 'M', 'ᆞ'),
(0x318E, 'M', 'ᆡ'),
(0x318F, 'X'),
(0x3190, 'V'),
(0x3192, 'M', '一'),
(0x3193, 'M', '二'),
(0x3194, 'M', '三'),
(0x3195, 'M', '四'),
(0x3196, 'M', '上'),
(0x3197, 'M', '中'),
(0x3198, 'M', '下'),
(0x3199, 'M', '甲'),
(0x319A, 'M', '乙'),
(0x319B, 'M', '丙'),
(0x319C, 'M', '丁'),
(0x319D, 'M', '天'),
(0x319E, 'M', '地'),
(0x319F, 'M', '人'),
(0x31A0, 'V'),
(0x31E4, 'X'),
(0x31F0, 'V'),
(0x3200, '3', '(ᄀ)'),
(0x3201, '3', '(ᄂ)'),
(0x3202, '3', '(ᄃ)'),
(0x3203, '3', '(ᄅ)'),
(0x3204, '3', '(ᄆ)'),
(0x3205, '3', '(ᄇ)'),
(0x3206, '3', '(ᄉ)'),
(0x3207, '3', '(ᄋ)'),
(0x3208, '3', '(ᄌ)'),
(0x3209, '3', '(ᄎ)'),
(0x320A, '3', '(ᄏ)'),
(0x320B, '3', '(ᄐ)'),
(0x320C, '3', '(ᄑ)'),
(0x320D, '3', '(ᄒ)'),
(0x320E, '3', '(가)'),
(0x320F, '3', '(나)'),
(0x3210, '3', '(다)'),
(0x3211, '3', '(라)'),
(0x3212, '3', '(마)'),
(0x3213, '3', '(바)'),
(0x3214, '3', '(사)'),
(0x3215, '3', '(아)'),
(0x3216, '3', '(자)'),
(0x3217, '3', '(차)'),
(0x3218, '3', '(카)'),
(0x3219, '3', '(타)'),
(0x321A, '3', '(파)'),
(0x321B, '3', '(하)'),
(0x321C, '3', '(주)'),
(0x321D, '3', '(오전)'),
(0x321E, '3', '(오후)'),
(0x321F, 'X'),
(0x3220, '3', '(一)'),
(0x3221, '3', '(二)'),
(0x3222, '3', '(三)'),
(0x3223, '3', '(四)'),
(0x3224, '3', '(五)'),
(0x3225, '3', '(六)'),
(0x3226, '3', '(七)'),
(0x3227, '3', '(八)'),
(0x3228, '3', '(九)'),
(0x3229, '3', '(十)'),
(0x322A, '3', '(月)'),
(0x322B, '3', '(火)'),
(0x322C, '3', '(水)'),
(0x322D, '3', '(木)'),
(0x322E, '3', '(金)'),
(0x322F, '3', '(土)'),
(0x3230, '3', '(日)'),
(0x3231, '3', '(株)'),
(0x3232, '3', '(有)'),
(0x3233, '3', '(社)'),
(0x3234, '3', '(名)'),
(0x3235, '3', '(特)'),
(0x3236, '3', '(財)'),
(0x3237, '3', '(祝)'),
(0x3238, '3', '(労)'),
(0x3239, '3', '(代)'),
(0x323A, '3', '(呼)'),
(0x323B, '3', '(学)'),
(0x323C, '3', '(監)'),
(0x323D, '3', '(企)'),
(0x323E, '3', '(資)'),
(0x323F, '3', '(協)'),
(0x3240, '3', '(祭)'),
(0x3241, '3', '(休)'),
(0x3242, '3', '(自)'),
(0x3243, '3', '(至)'),
(0x3244, 'M', '問'),
(0x3245, 'M', '幼'),
(0x3246, 'M', '文'),
]
def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x3247, 'M', '箏'),
(0x3248, 'V'),
(0x3250, 'M', 'pte'),
(0x3251, 'M', '21'),
(0x3252, 'M', '22'),
(0x3253, 'M', '23'),
(0x3254, 'M', '24'),
(0x3255, 'M', '25'),
(0x3256, 'M', '26'),
(0x3257, 'M', '27'),
(0x3258, 'M', '28'),
(0x3259, 'M', '29'),
(0x325A, 'M', '30'),
(0x325B, 'M', '31'),
(0x325C, 'M', '32'),
(0x325D, 'M', '33'),
(0x325E, 'M', '34'),
(0x325F, 'M', '35'),
(0x3260, 'M', 'ᄀ'),
(0x3261, 'M', 'ᄂ'),
(0x3262, 'M', 'ᄃ'),
(0x3263, 'M', 'ᄅ'),
(0x3264, 'M', 'ᄆ'),
(0x3265, 'M', 'ᄇ'),
(0x3266, 'M', 'ᄉ'),
(0x3267, 'M', 'ᄋ'),
(0x3268, 'M', 'ᄌ'),
(0x3269, 'M', 'ᄎ'),
(0x326A, 'M', 'ᄏ'),
(0x326B, 'M', 'ᄐ'),
(0x326C, 'M', 'ᄑ'),
(0x326D, 'M', 'ᄒ'),
(0x326E, 'M', '가'),
(0x326F, 'M', '나'),
(0x3270, 'M', '다'),
(0x3271, 'M', '라'),
(0x3272, 'M', '마'),
(0x3273, 'M', '바'),
(0x3274, 'M', '사'),
(0x3275, 'M', '아'),
(0x3276, 'M', '자'),
(0x3277, 'M', '차'),
(0x3278, 'M', '카'),
(0x3279, 'M', '타'),
(0x327A, 'M', '파'),
(0x327B, 'M', '하'),
(0x327C, 'M', '참고'),
(0x327D, 'M', '주의'),
(0x327E, 'M', '우'),
(0x327F, 'V'),
(0x3280, 'M', '一'),
(0x3281, 'M', '二'),
(0x3282, 'M', '三'),
(0x3283, 'M', '四'),
(0x3284, 'M', '五'),
(0x3285, 'M', '六'),
(0x3286, 'M', '七'),
(0x3287, 'M', '八'),
(0x3288, 'M', '九'),
(0x3289, 'M', '十'),
(0x328A, 'M', '月'),
(0x328B, 'M', '火'),
(0x328C, 'M', '水'),
(0x328D, 'M', '木'),
(0x328E, 'M', '金'),
(0x328F, 'M', '土'),
(0x3290, 'M', '日'),
(0x3291, 'M', '株'),
(0x3292, 'M', '有'),
(0x3293, 'M', '社'),
(0x3294, 'M', '名'),
(0x3295, 'M', '特'),
(0x3296, 'M', '財'),
(0x3297, 'M', '祝'),
(0x3298, 'M', '労'),
(0x3299, 'M', '秘'),
(0x329A, 'M', '男'),
(0x329B, 'M', '女'),
(0x329C, 'M', '適'),
(0x329D, 'M', '優'),
(0x329E, 'M', '印'),
(0x329F, 'M', '注'),
(0x32A0, 'M', '項'),
(0x32A1, 'M', '休'),
(0x32A2, 'M', '写'),
(0x32A3, 'M', '正'),
(0x32A4, 'M', '上'),
(0x32A5, 'M', '中'),
(0x32A6, 'M', '下'),
(0x32A7, 'M', '左'),
(0x32A8, 'M', '右'),
(0x32A9, 'M', '医'),
(0x32AA, 'M', '宗'),
(0x32AB, 'M', '学'),
(0x32AC, 'M', '監'),
(0x32AD, 'M', '企'),
(0x32AE, 'M', '資'),
(0x32AF, 'M', '協'),
(0x32B0, 'M', '夜'),
(0x32B1, 'M', '36'),
]
def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x32B2, 'M', '37'),
(0x32B3, 'M', '38'),
(0x32B4, 'M', '39'),
(0x32B5, 'M', '40'),
(0x32B6, 'M', '41'),
(0x32B7, 'M', '42'),
(0x32B8, 'M', '43'),
(0x32B9, 'M', '44'),
(0x32BA, 'M', '45'),
(0x32BB, 'M', '46'),
(0x32BC, 'M', '47'),
(0x32BD, 'M', '48'),
(0x32BE, 'M', '49'),
(0x32BF, 'M', '50'),
(0x32C0, 'M', '1月'),
(0x32C1, 'M', '2月'),
(0x32C2, 'M', '3月'),
(0x32C3, 'M', '4月'),
(0x32C4, 'M', '5月'),
(0x32C5, 'M', '6月'),
(0x32C6, 'M', '7月'),
(0x32C7, 'M', '8月'),
(0x32C8, 'M', '9月'),
(0x32C9, 'M', '10月'),
(0x32CA, 'M', '11月'),
(0x32CB, 'M', '12月'),
(0x32CC, 'M', 'hg'),
(0x32CD, 'M', 'erg'),
(0x32CE, 'M', 'ev'),
(0x32CF, 'M', 'ltd'),
(0x32D0, 'M', 'ア'),
(0x32D1, 'M', 'イ'),
(0x32D2, 'M', 'ウ'),
(0x32D3, 'M', 'エ'),
(0x32D4, 'M', 'オ'),
(0x32D5, 'M', 'カ'),
(0x32D6, 'M', 'キ'),
(0x32D7, 'M', 'ク'),
(0x32D8, 'M', 'ケ'),
(0x32D9, 'M', 'コ'),
(0x32DA, 'M', 'サ'),
(0x32DB, 'M', 'シ'),
(0x32DC, 'M', 'ス'),
(0x32DD, 'M', 'セ'),
(0x32DE, 'M', 'ソ'),
(0x32DF, 'M', 'タ'),
(0x32E0, 'M', 'チ'),
(0x32E1, 'M', 'ツ'),
(0x32E2, 'M', 'テ'),
(0x32E3, 'M', 'ト'),
(0x32E4, 'M', 'ナ'),
(0x32E5, 'M', 'ニ'),
(0x32E6, 'M', 'ヌ'),
(0x32E7, 'M', 'ネ'),
(0x32E8, 'M', 'ノ'),
(0x32E9, 'M', 'ハ'),
(0x32EA, 'M', 'ヒ'),
(0x32EB, 'M', 'フ'),
(0x32EC, 'M', 'ヘ'),
(0x32ED, 'M', 'ホ'),
(0x32EE, 'M', 'マ'),
(0x32EF, 'M', 'ミ'),
(0x32F0, 'M', 'ム'),
(0x32F1, 'M', 'メ'),
(0x32F2, 'M', 'モ'),
(0x32F3, 'M', 'ヤ'),
(0x32F4, 'M', 'ユ'),
(0x32F5, 'M', 'ヨ'),
(0x32F6, 'M', 'ラ'),
(0x32F7, 'M', 'リ'),
(0x32F8, 'M', 'ル'),
(0x32F9, 'M', 'レ'),
(0x32FA, 'M', 'ロ'),
(0x32FB, 'M', 'ワ'),
(0x32FC, 'M', 'ヰ'),
(0x32FD, 'M', 'ヱ'),
(0x32FE, 'M', 'ヲ'),
(0x32FF, 'M', '令和'),
(0x3300, 'M', 'アパート'),
(0x3301, 'M', 'アルファ'),
(0x3302, 'M', 'アンペア'),
(0x3303, 'M', 'アール'),
(0x3304, 'M', 'イニング'),
(0x3305, 'M', 'インチ'),
(0x3306, 'M', 'ウォン'),
(0x3307, 'M', 'エスクード'),
(0x3308, 'M', 'エーカー'),
(0x3309, 'M', 'オンス'),
(0x330A, 'M', 'オーム'),
(0x330B, 'M', 'カイリ'),
(0x330C, 'M', 'カラット'),
(0x330D, 'M', 'カロリー'),
(0x330E, 'M', 'ガロン'),
(0x330F, 'M', 'ガンマ'),
(0x3310, 'M', 'ギガ'),
(0x3311, 'M', 'ギニー'),
(0x3312, 'M', 'キュリー'),
(0x3313, 'M', 'ギルダー'),
(0x3314, 'M', 'キロ'),
(0x3315, 'M', 'キログラム'),
]
def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x3316, 'M', 'キロメートル'),
(0x3317, 'M', 'キロワット'),
(0x3318, 'M', 'グラム'),
(0x3319, 'M', 'グラムトン'),
(0x331A, 'M', 'クルゼイロ'),
(0x331B, 'M', 'クローネ'),
(0x331C, 'M', 'ケース'),
(0x331D, 'M', 'コルナ'),
(0x331E, 'M', 'コーポ'),
(0x331F, 'M', 'サイクル'),
(0x3320, 'M', 'サンチーム'),
(0x3321, 'M', 'シリング'),
(0x3322, 'M', 'センチ'),
(0x3323, 'M', 'セント'),
(0x3324, 'M', 'ダース'),
(0x3325, 'M', 'デシ'),
(0x3326, 'M', 'ドル'),
(0x3327, 'M', 'トン'),
(0x3328, 'M', 'ナノ'),
(0x3329, 'M', 'ノット'),
(0x332A, 'M', 'ハイツ'),
(0x332B, 'M', 'パーセント'),
(0x332C, 'M', 'パーツ'),
(0x332D, 'M', 'バーレル'),
(0x332E, 'M', 'ピアストル'),
(0x332F, 'M', 'ピクル'),
(0x3330, 'M', 'ピコ'),
(0x3331, 'M', 'ビル'),
(0x3332, 'M', 'ファラッド'),
(0x3333, 'M', 'フィート'),
(0x3334, 'M', 'ブッシェル'),
(0x3335, 'M', 'フラン'),
(0x3336, 'M', 'ヘクタール'),
(0x3337, 'M', 'ペソ'),
(0x3338, 'M', 'ペニヒ'),
(0x3339, 'M', 'ヘルツ'),
(0x333A, 'M', 'ペンス'),
(0x333B, 'M', 'ページ'),
(0x333C, 'M', 'ベータ'),
(0x333D, 'M', 'ポイント'),
(0x333E, 'M', 'ボルト'),
(0x333F, 'M', 'ホン'),
(0x3340, 'M', 'ポンド'),
(0x3341, 'M', 'ホール'),
(0x3342, 'M', 'ホーン'),
(0x3343, 'M', 'マイクロ'),
(0x3344, 'M', 'マイル'),
(0x3345, 'M', 'マッハ'),
(0x3346, 'M', 'マルク'),
(0x3347, 'M', 'マンション'),
(0x3348, 'M', 'ミクロン'),
(0x3349, 'M', 'ミリ'),
(0x334A, 'M', 'ミリバール'),
(0x334B, 'M', 'メガ'),
(0x334C, 'M', 'メガトン'),
(0x334D, 'M', 'メートル'),
(0x334E, 'M', 'ヤード'),
(0x334F, 'M', 'ヤール'),
(0x3350, 'M', 'ユアン'),
(0x3351, 'M', 'リットル'),
(0x3352, 'M', 'リラ'),
(0x3353, 'M', 'ルピー'),
(0x3354, 'M', 'ルーブル'),
(0x3355, 'M', 'レム'),
(0x3356, 'M', 'レントゲン'),
(0x3357, 'M', 'ワット'),
(0x3358, 'M', '0点'),
(0x3359, 'M', '1点'),
(0x335A, 'M', '2点'),
(0x335B, 'M', '3点'),
(0x335C, 'M', '4点'),
(0x335D, 'M', '5点'),
(0x335E, 'M', '6点'),
(0x335F, 'M', '7点'),
(0x3360, 'M', '8点'),
(0x3361, 'M', '9点'),
(0x3362, 'M', '10点'),
(0x3363, 'M', '11点'),
(0x3364, 'M', '12点'),
(0x3365, 'M', '13点'),
(0x3366, 'M', '14点'),
(0x3367, 'M', '15点'),
(0x3368, 'M', '16点'),
(0x3369, 'M', '17点'),
(0x336A, 'M', '18点'),
(0x336B, 'M', '19点'),
(0x336C, 'M', '20点'),
(0x336D, 'M', '21点'),
(0x336E, 'M', '22点'),
(0x336F, 'M', '23点'),
(0x3370, 'M', '24点'),
(0x3371, 'M', 'hpa'),
(0x3372, 'M', 'da'),
(0x3373, 'M', 'au'),
(0x3374, 'M', 'bar'),
(0x3375, 'M', 'ov'),
(0x3376, 'M', 'pc'),
(0x3377, 'M', 'dm'),
(0x3378, 'M', 'dm2'),
(0x3379, 'M', 'dm3'),
]
def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x337A, 'M', 'iu'),
(0x337B, 'M', '平成'),
(0x337C, 'M', '昭和'),
(0x337D, 'M', '大正'),
(0x337E, 'M', '明治'),
(0x337F, 'M', '株式会社'),
(0x3380, 'M', 'pa'),
(0x3381, 'M', 'na'),
(0x3382, 'M', 'μa'),
(0x3383, 'M', 'ma'),
(0x3384, 'M', 'ka'),
(0x3385, 'M', 'kb'),
(0x3386, 'M', 'mb'),
(0x3387, 'M', 'gb'),
(0x3388, 'M', 'cal'),
(0x3389, 'M', 'kcal'),
(0x338A, 'M', 'pf'),
(0x338B, 'M', 'nf'),
(0x338C, 'M', 'μf'),
(0x338D, 'M', 'μg'),
(0x338E, 'M', 'mg'),
(0x338F, 'M', 'kg'),
(0x3390, 'M', 'hz'),
(0x3391, 'M', 'khz'),
(0x3392, 'M', 'mhz'),
(0x3393, 'M', 'ghz'),
(0x3394, 'M', 'thz'),
(0x3395, 'M', 'μl'),
(0x3396, 'M', 'ml'),
(0x3397, 'M', 'dl'),
(0x3398, 'M', 'kl'),
(0x3399, 'M', 'fm'),
(0x339A, 'M', 'nm'),
(0x339B, 'M', 'μm'),
(0x339C, 'M', 'mm'),
(0x339D, 'M', 'cm'),
(0x339E, 'M', 'km'),
(0x339F, 'M', 'mm2'),
(0x33A0, 'M', 'cm2'),
(0x33A1, 'M', 'm2'),
(0x33A2, 'M', 'km2'),
(0x33A3, 'M', 'mm3'),
(0x33A4, 'M', 'cm3'),
(0x33A5, 'M', 'm3'),
(0x33A6, 'M', 'km3'),
(0x33A7, 'M', 'm∕s'),
(0x33A8, 'M', 'm∕s2'),
(0x33A9, 'M', 'pa'),
(0x33AA, 'M', 'kpa'),
(0x33AB, 'M', 'mpa'),
(0x33AC, 'M', 'gpa'),
(0x33AD, 'M', 'rad'),
(0x33AE, 'M', 'rad∕s'),
(0x33AF, 'M', 'rad∕s2'),
(0x33B0, 'M', 'ps'),
(0x33B1, 'M', 'ns'),
(0x33B2, 'M', 'μs'),
(0x33B3, 'M', 'ms'),
(0x33B4, 'M', 'pv'),
(0x33B5, 'M', 'nv'),
(0x33B6, 'M', 'μv'),
(0x33B7, 'M', 'mv'),
(0x33B8, 'M', 'kv'),
(0x33B9, 'M', 'mv'),
(0x33BA, 'M', 'pw'),
(0x33BB, 'M', 'nw'),
(0x33BC, 'M', 'μw'),
(0x33BD, 'M', 'mw'),
(0x33BE, 'M', 'kw'),
(0x33BF, 'M', 'mw'),
(0x33C0, 'M', 'kω'),
(0x33C1, 'M', 'mω'),
(0x33C2, 'X'),
(0x33C3, 'M', 'bq'),
(0x33C4, 'M', 'cc'),
(0x33C5, 'M', 'cd'),
(0x33C6, 'M', 'c∕kg'),
(0x33C7, 'X'),
(0x33C8, 'M', 'db'),
(0x33C9, 'M', 'gy'),
(0x33CA, 'M', 'ha'),
(0x33CB, 'M', 'hp'),
(0x33CC, 'M', 'in'),
(0x33CD, 'M', 'kk'),
(0x33CE, 'M', 'km'),
(0x33CF, 'M', 'kt'),
(0x33D0, 'M', 'lm'),
(0x33D1, 'M', 'ln'),
(0x33D2, 'M', 'log'),
(0x33D3, 'M', 'lx'),
(0x33D4, 'M', 'mb'),
(0x33D5, 'M', 'mil'),
(0x33D6, 'M', 'mol'),
(0x33D7, 'M', 'ph'),
(0x33D8, 'X'),
(0x33D9, 'M', 'ppm'),
(0x33DA, 'M', 'pr'),
(0x33DB, 'M', 'sr'),
(0x33DC, 'M', 'sv'),
(0x33DD, 'M', 'wb'),
]
def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x33DE, 'M', 'v∕m'),
(0x33DF, 'M', 'a∕m'),
(0x33E0, 'M', '1日'),
(0x33E1, 'M', '2日'),
(0x33E2, 'M', '3日'),
(0x33E3, 'M', '4日'),
(0x33E4, 'M', '5日'),
(0x33E5, 'M', '6日'),
(0x33E6, 'M', '7日'),
(0x33E7, 'M', '8日'),
(0x33E8, 'M', '9日'),
(0x33E9, 'M', '10日'),
(0x33EA, 'M', '11日'),
(0x33EB, 'M', '12日'),
(0x33EC, 'M', '13日'),
(0x33ED, 'M', '14日'),
(0x33EE, 'M', '15日'),
(0x33EF, 'M', '16日'),
(0x33F0, 'M', '17日'),
(0x33F1, 'M', '18日'),
(0x33F2, 'M', '19日'),
(0x33F3, 'M', '20日'),
(0x33F4, 'M', '21日'),
(0x33F5, 'M', '22日'),
(0x33F6, 'M', '23日'),
(0x33F7, 'M', '24日'),
(0x33F8, 'M', '25日'),
(0x33F9, 'M', '26日'),
(0x33FA, 'M', '27日'),
(0x33FB, 'M', '28日'),
(0x33FC, 'M', '29日'),
(0x33FD, 'M', '30日'),
(0x33FE, 'M', '31日'),
(0x33FF, 'M', 'gal'),
(0x3400, 'V'),
(0xA48D, 'X'),
(0xA490, 'V'),
(0xA4C7, 'X'),
(0xA4D0, 'V'),
(0xA62C, 'X'),
(0xA640, 'M', 'ꙁ'),
(0xA641, 'V'),
(0xA642, 'M', 'ꙃ'),
(0xA643, 'V'),
(0xA644, 'M', 'ꙅ'),
(0xA645, 'V'),
(0xA646, 'M', 'ꙇ'),
(0xA647, 'V'),
(0xA648, 'M', 'ꙉ'),
(0xA649, 'V'),
(0xA64A, 'M', 'ꙋ'),
(0xA64B, 'V'),
(0xA64C, 'M', 'ꙍ'),
(0xA64D, 'V'),
(0xA64E, 'M', 'ꙏ'),
(0xA64F, 'V'),
(0xA650, 'M', 'ꙑ'),
(0xA651, 'V'),
(0xA652, 'M', 'ꙓ'),
(0xA653, 'V'),
(0xA654, 'M', 'ꙕ'),
(0xA655, 'V'),
(0xA656, 'M', 'ꙗ'),
(0xA657, 'V'),
(0xA658, 'M', 'ꙙ'),
(0xA659, 'V'),
(0xA65A, 'M', 'ꙛ'),
(0xA65B, 'V'),
(0xA65C, 'M', 'ꙝ'),
(0xA65D, 'V'),
(0xA65E, 'M', 'ꙟ'),
(0xA65F, 'V'),
(0xA660, 'M', 'ꙡ'),
(0xA661, 'V'),
(0xA662, 'M', 'ꙣ'),
(0xA663, 'V'),
(0xA664, 'M', 'ꙥ'),
(0xA665, 'V'),
(0xA666, 'M', 'ꙧ'),
(0xA667, 'V'),
(0xA668, 'M', 'ꙩ'),
(0xA669, 'V'),
(0xA66A, 'M', 'ꙫ'),
(0xA66B, 'V'),
(0xA66C, 'M', 'ꙭ'),
(0xA66D, 'V'),
(0xA680, 'M', 'ꚁ'),
(0xA681, 'V'),
(0xA682, 'M', 'ꚃ'),
(0xA683, 'V'),
(0xA684, 'M', 'ꚅ'),
(0xA685, 'V'),
(0xA686, 'M', 'ꚇ'),
(0xA687, 'V'),
(0xA688, 'M', 'ꚉ'),
(0xA689, 'V'),
(0xA68A, 'M', 'ꚋ'),
(0xA68B, 'V'),
(0xA68C, 'M', 'ꚍ'),
(0xA68D, 'V'),
]
def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xA68E, 'M', 'ꚏ'),
(0xA68F, 'V'),
(0xA690, 'M', 'ꚑ'),
(0xA691, 'V'),
(0xA692, 'M', 'ꚓ'),
(0xA693, 'V'),
(0xA694, 'M', 'ꚕ'),
(0xA695, 'V'),
(0xA696, 'M', 'ꚗ'),
(0xA697, 'V'),
(0xA698, 'M', 'ꚙ'),
(0xA699, 'V'),
(0xA69A, 'M', 'ꚛ'),
(0xA69B, 'V'),
(0xA69C, 'M', 'ъ'),
(0xA69D, 'M', 'ь'),
(0xA69E, 'V'),
(0xA6F8, 'X'),
(0xA700, 'V'),
(0xA722, 'M', 'ꜣ'),
(0xA723, 'V'),
(0xA724, 'M', 'ꜥ'),
(0xA725, 'V'),
(0xA726, 'M', 'ꜧ'),
(0xA727, 'V'),
(0xA728, 'M', 'ꜩ'),
(0xA729, 'V'),
(0xA72A, 'M', 'ꜫ'),
(0xA72B, 'V'),
(0xA72C, 'M', 'ꜭ'),
(0xA72D, 'V'),
(0xA72E, 'M', 'ꜯ'),
(0xA72F, 'V'),
(0xA732, 'M', 'ꜳ'),
(0xA733, 'V'),
(0xA734, 'M', 'ꜵ'),
(0xA735, 'V'),
(0xA736, 'M', 'ꜷ'),
(0xA737, 'V'),
(0xA738, 'M', 'ꜹ'),
(0xA739, 'V'),
(0xA73A, 'M', 'ꜻ'),
(0xA73B, 'V'),
(0xA73C, 'M', 'ꜽ'),
(0xA73D, 'V'),
(0xA73E, 'M', 'ꜿ'),
(0xA73F, 'V'),
(0xA740, 'M', 'ꝁ'),
(0xA741, 'V'),
(0xA742, 'M', 'ꝃ'),
(0xA743, 'V'),
(0xA744, 'M', 'ꝅ'),
(0xA745, 'V'),
(0xA746, 'M', 'ꝇ'),
(0xA747, 'V'),
(0xA748, 'M', 'ꝉ'),
(0xA749, 'V'),
(0xA74A, 'M', 'ꝋ'),
(0xA74B, 'V'),
(0xA74C, 'M', 'ꝍ'),
(0xA74D, 'V'),
(0xA74E, 'M', 'ꝏ'),
(0xA74F, 'V'),
(0xA750, 'M', 'ꝑ'),
(0xA751, 'V'),
(0xA752, 'M', 'ꝓ'),
(0xA753, 'V'),
(0xA754, 'M', 'ꝕ'),
(0xA755, 'V'),
(0xA756, 'M', 'ꝗ'),
(0xA757, 'V'),
(0xA758, 'M', 'ꝙ'),
(0xA759, 'V'),
(0xA75A, 'M', 'ꝛ'),
(0xA75B, 'V'),
(0xA75C, 'M', 'ꝝ'),
(0xA75D, 'V'),
(0xA75E, 'M', 'ꝟ'),
(0xA75F, 'V'),
(0xA760, 'M', 'ꝡ'),
(0xA761, 'V'),
(0xA762, 'M', 'ꝣ'),
(0xA763, 'V'),
(0xA764, 'M', 'ꝥ'),
(0xA765, 'V'),
(0xA766, 'M', 'ꝧ'),
(0xA767, 'V'),
(0xA768, 'M', 'ꝩ'),
(0xA769, 'V'),
(0xA76A, 'M', 'ꝫ'),
(0xA76B, 'V'),
(0xA76C, 'M', 'ꝭ'),
(0xA76D, 'V'),
(0xA76E, 'M', 'ꝯ'),
(0xA76F, 'V'),
(0xA770, 'M', 'ꝯ'),
(0xA771, 'V'),
(0xA779, 'M', 'ꝺ'),
(0xA77A, 'V'),
(0xA77B, 'M', 'ꝼ'),
]
def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xA77C, 'V'),
(0xA77D, 'M', 'ᵹ'),
(0xA77E, 'M', 'ꝿ'),
(0xA77F, 'V'),
(0xA780, 'M', 'ꞁ'),
(0xA781, 'V'),
(0xA782, 'M', 'ꞃ'),
(0xA783, 'V'),
(0xA784, 'M', 'ꞅ'),
(0xA785, 'V'),
(0xA786, 'M', 'ꞇ'),
(0xA787, 'V'),
(0xA78B, 'M', 'ꞌ'),
(0xA78C, 'V'),
(0xA78D, 'M', 'ɥ'),
(0xA78E, 'V'),
(0xA790, 'M', 'ꞑ'),
(0xA791, 'V'),
(0xA792, 'M', 'ꞓ'),
(0xA793, 'V'),
(0xA796, 'M', 'ꞗ'),
(0xA797, 'V'),
(0xA798, 'M', 'ꞙ'),
(0xA799, 'V'),
(0xA79A, 'M', 'ꞛ'),
(0xA79B, 'V'),
(0xA79C, 'M', 'ꞝ'),
(0xA79D, 'V'),
(0xA79E, 'M', 'ꞟ'),
(0xA79F, 'V'),
(0xA7A0, 'M', 'ꞡ'),
(0xA7A1, 'V'),
(0xA7A2, 'M', 'ꞣ'),
(0xA7A3, 'V'),
(0xA7A4, 'M', 'ꞥ'),
(0xA7A5, 'V'),
(0xA7A6, 'M', 'ꞧ'),
(0xA7A7, 'V'),
(0xA7A8, 'M', 'ꞩ'),
(0xA7A9, 'V'),
(0xA7AA, 'M', 'ɦ'),
(0xA7AB, 'M', 'ɜ'),
(0xA7AC, 'M', 'ɡ'),
(0xA7AD, 'M', 'ɬ'),
(0xA7AE, 'M', 'ɪ'),
(0xA7AF, 'V'),
(0xA7B0, 'M', 'ʞ'),
(0xA7B1, 'M', 'ʇ'),
(0xA7B2, 'M', 'ʝ'),
(0xA7B3, 'M', 'ꭓ'),
(0xA7B4, 'M', 'ꞵ'),
(0xA7B5, 'V'),
(0xA7B6, 'M', 'ꞷ'),
(0xA7B7, 'V'),
(0xA7B8, 'M', 'ꞹ'),
(0xA7B9, 'V'),
(0xA7BA, 'M', 'ꞻ'),
(0xA7BB, 'V'),
(0xA7BC, 'M', 'ꞽ'),
(0xA7BD, 'V'),
(0xA7BE, 'M', 'ꞿ'),
(0xA7BF, 'V'),
(0xA7C0, 'M', 'ꟁ'),
(0xA7C1, 'V'),
(0xA7C2, 'M', 'ꟃ'),
(0xA7C3, 'V'),
(0xA7C4, 'M', 'ꞔ'),
(0xA7C5, 'M', 'ʂ'),
(0xA7C6, 'M', 'ᶎ'),
(0xA7C7, 'M', 'ꟈ'),
(0xA7C8, 'V'),
(0xA7C9, 'M', 'ꟊ'),
(0xA7CA, 'V'),
(0xA7CB, 'X'),
(0xA7D0, 'M', 'ꟑ'),
(0xA7D1, 'V'),
(0xA7D2, 'X'),
(0xA7D3, 'V'),
(0xA7D4, 'X'),
(0xA7D5, 'V'),
(0xA7D6, 'M', 'ꟗ'),
(0xA7D7, 'V'),
(0xA7D8, 'M', 'ꟙ'),
(0xA7D9, 'V'),
(0xA7DA, 'X'),
(0xA7F2, 'M', 'c'),
(0xA7F3, 'M', 'f'),
(0xA7F4, 'M', 'q'),
(0xA7F5, 'M', 'ꟶ'),
(0xA7F6, 'V'),
(0xA7F8, 'M', 'ħ'),
(0xA7F9, 'M', 'œ'),
(0xA7FA, 'V'),
(0xA82D, 'X'),
(0xA830, 'V'),
(0xA83A, 'X'),
(0xA840, 'V'),
(0xA878, 'X'),
(0xA880, 'V'),
(0xA8C6, 'X'),
]
def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xA8CE, 'V'),
(0xA8DA, 'X'),
(0xA8E0, 'V'),
(0xA954, 'X'),
(0xA95F, 'V'),
(0xA97D, 'X'),
(0xA980, 'V'),
(0xA9CE, 'X'),
(0xA9CF, 'V'),
(0xA9DA, 'X'),
(0xA9DE, 'V'),
(0xA9FF, 'X'),
(0xAA00, 'V'),
(0xAA37, 'X'),
(0xAA40, 'V'),
(0xAA4E, 'X'),
(0xAA50, 'V'),
(0xAA5A, 'X'),
(0xAA5C, 'V'),
(0xAAC3, 'X'),
(0xAADB, 'V'),
(0xAAF7, 'X'),
(0xAB01, 'V'),
(0xAB07, 'X'),
(0xAB09, 'V'),
(0xAB0F, 'X'),
(0xAB11, 'V'),
(0xAB17, 'X'),
(0xAB20, 'V'),
(0xAB27, 'X'),
(0xAB28, 'V'),
(0xAB2F, 'X'),
(0xAB30, 'V'),
(0xAB5C, 'M', 'ꜧ'),
(0xAB5D, 'M', 'ꬷ'),
(0xAB5E, 'M', 'ɫ'),
(0xAB5F, 'M', 'ꭒ'),
(0xAB60, 'V'),
(0xAB69, 'M', 'ʍ'),
(0xAB6A, 'V'),
(0xAB6C, 'X'),
(0xAB70, 'M', 'Ꭰ'),
(0xAB71, 'M', 'Ꭱ'),
(0xAB72, 'M', 'Ꭲ'),
(0xAB73, 'M', 'Ꭳ'),
(0xAB74, 'M', 'Ꭴ'),
(0xAB75, 'M', 'Ꭵ'),
(0xAB76, 'M', 'Ꭶ'),
(0xAB77, 'M', 'Ꭷ'),
(0xAB78, 'M', 'Ꭸ'),
(0xAB79, 'M', 'Ꭹ'),
(0xAB7A, 'M', 'Ꭺ'),
(0xAB7B, 'M', 'Ꭻ'),
(0xAB7C, 'M', 'Ꭼ'),
(0xAB7D, 'M', 'Ꭽ'),
(0xAB7E, 'M', 'Ꭾ'),
(0xAB7F, 'M', 'Ꭿ'),
(0xAB80, 'M', 'Ꮀ'),
(0xAB81, 'M', 'Ꮁ'),
(0xAB82, 'M', 'Ꮂ'),
(0xAB83, 'M', 'Ꮃ'),
(0xAB84, 'M', 'Ꮄ'),
(0xAB85, 'M', 'Ꮅ'),
(0xAB86, 'M', 'Ꮆ'),
(0xAB87, 'M', 'Ꮇ'),
(0xAB88, 'M', 'Ꮈ'),
(0xAB89, 'M', 'Ꮉ'),
(0xAB8A, 'M', 'Ꮊ'),
(0xAB8B, 'M', 'Ꮋ'),
(0xAB8C, 'M', 'Ꮌ'),
(0xAB8D, 'M', 'Ꮍ'),
(0xAB8E, 'M', 'Ꮎ'),
(0xAB8F, 'M', 'Ꮏ'),
(0xAB90, 'M', 'Ꮐ'),
(0xAB91, 'M', 'Ꮑ'),
(0xAB92, 'M', 'Ꮒ'),
(0xAB93, 'M', 'Ꮓ'),
(0xAB94, 'M', 'Ꮔ'),
(0xAB95, 'M', 'Ꮕ'),
(0xAB96, 'M', 'Ꮖ'),
(0xAB97, 'M', 'Ꮗ'),
(0xAB98, 'M', 'Ꮘ'),
(0xAB99, 'M', 'Ꮙ'),
(0xAB9A, 'M', 'Ꮚ'),
(0xAB9B, 'M', 'Ꮛ'),
(0xAB9C, 'M', 'Ꮜ'),
(0xAB9D, 'M', 'Ꮝ'),
(0xAB9E, 'M', 'Ꮞ'),
(0xAB9F, 'M', 'Ꮟ'),
(0xABA0, 'M', 'Ꮠ'),
(0xABA1, 'M', 'Ꮡ'),
(0xABA2, 'M', 'Ꮢ'),
(0xABA3, 'M', 'Ꮣ'),
(0xABA4, 'M', 'Ꮤ'),
(0xABA5, 'M', 'Ꮥ'),
(0xABA6, 'M', 'Ꮦ'),
(0xABA7, 'M', 'Ꮧ'),
(0xABA8, 'M', 'Ꮨ'),
(0xABA9, 'M', 'Ꮩ'),
(0xABAA, 'M', 'Ꮪ'),
]
def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xABAB, 'M', 'Ꮫ'),
(0xABAC, 'M', 'Ꮬ'),
(0xABAD, 'M', 'Ꮭ'),
(0xABAE, 'M', 'Ꮮ'),
(0xABAF, 'M', 'Ꮯ'),
(0xABB0, 'M', 'Ꮰ'),
(0xABB1, 'M', 'Ꮱ'),
(0xABB2, 'M', 'Ꮲ'),
(0xABB3, 'M', 'Ꮳ'),
(0xABB4, 'M', 'Ꮴ'),
(0xABB5, 'M', 'Ꮵ'),
(0xABB6, 'M', 'Ꮶ'),
(0xABB7, 'M', 'Ꮷ'),
(0xABB8, 'M', 'Ꮸ'),
(0xABB9, 'M', 'Ꮹ'),
(0xABBA, 'M', 'Ꮺ'),
(0xABBB, 'M', 'Ꮻ'),
(0xABBC, 'M', 'Ꮼ'),
(0xABBD, 'M', 'Ꮽ'),
(0xABBE, 'M', 'Ꮾ'),
(0xABBF, 'M', 'Ꮿ'),
(0xABC0, 'V'),
(0xABEE, 'X'),
(0xABF0, 'V'),
(0xABFA, 'X'),
(0xAC00, 'V'),
(0xD7A4, 'X'),
(0xD7B0, 'V'),
(0xD7C7, 'X'),
(0xD7CB, 'V'),
(0xD7FC, 'X'),
(0xF900, 'M', '豈'),
(0xF901, 'M', '更'),
(0xF902, 'M', '車'),
(0xF903, 'M', '賈'),
(0xF904, 'M', '滑'),
(0xF905, 'M', '串'),
(0xF906, 'M', '句'),
(0xF907, 'M', '龜'),
(0xF909, 'M', '契'),
(0xF90A, 'M', '金'),
(0xF90B, 'M', '喇'),
(0xF90C, 'M', '奈'),
(0xF90D, 'M', '懶'),
(0xF90E, 'M', '癩'),
(0xF90F, 'M', '羅'),
(0xF910, 'M', '蘿'),
(0xF911, 'M', '螺'),
(0xF912, 'M', '裸'),
(0xF913, 'M', '邏'),
(0xF914, 'M', '樂'),
(0xF915, 'M', '洛'),
(0xF916, 'M', '烙'),
(0xF917, 'M', '珞'),
(0xF918, 'M', '落'),
(0xF919, 'M', '酪'),
(0xF91A, 'M', '駱'),
(0xF91B, 'M', '亂'),
(0xF91C, 'M', '卵'),
(0xF91D, 'M', '欄'),
(0xF91E, 'M', '爛'),
(0xF91F, 'M', '蘭'),
(0xF920, 'M', '鸞'),
(0xF921, 'M', '嵐'),
(0xF922, 'M', '濫'),
(0xF923, 'M', '藍'),
(0xF924, 'M', '襤'),
(0xF925, 'M', '拉'),
(0xF926, 'M', '臘'),
(0xF927, 'M', '蠟'),
(0xF928, 'M', '廊'),
(0xF929, 'M', '朗'),
(0xF92A, 'M', '浪'),
(0xF92B, 'M', '狼'),
(0xF92C, 'M', '郎'),
(0xF92D, 'M', '來'),
(0xF92E, 'M', '冷'),
(0xF92F, 'M', '勞'),
(0xF930, 'M', '擄'),
(0xF931, 'M', '櫓'),
(0xF932, 'M', '爐'),
(0xF933, 'M', '盧'),
(0xF934, 'M', '老'),
(0xF935, 'M', '蘆'),
(0xF936, 'M', '虜'),
(0xF937, 'M', '路'),
(0xF938, 'M', '露'),
(0xF939, 'M', '魯'),
(0xF93A, 'M', '鷺'),
(0xF93B, 'M', '碌'),
(0xF93C, 'M', '祿'),
(0xF93D, 'M', '綠'),
(0xF93E, 'M', '菉'),
(0xF93F, 'M', '錄'),
(0xF940, 'M', '鹿'),
(0xF941, 'M', '論'),
(0xF942, 'M', '壟'),
(0xF943, 'M', '弄'),
(0xF944, 'M', '籠'),
(0xF945, 'M', '聾'),
]
def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xF946, 'M', '牢'),
(0xF947, 'M', '磊'),
(0xF948, 'M', '賂'),
(0xF949, 'M', '雷'),
(0xF94A, 'M', '壘'),
(0xF94B, 'M', '屢'),
(0xF94C, 'M', '樓'),
(0xF94D, 'M', '淚'),
(0xF94E, 'M', '漏'),
(0xF94F, 'M', '累'),
(0xF950, 'M', '縷'),
(0xF951, 'M', '陋'),
(0xF952, 'M', '勒'),
(0xF953, 'M', '肋'),
(0xF954, 'M', '凜'),
(0xF955, 'M', '凌'),
(0xF956, 'M', '稜'),
(0xF957, 'M', '綾'),
(0xF958, 'M', '菱'),
(0xF959, 'M', '陵'),
(0xF95A, 'M', '讀'),
(0xF95B, 'M', '拏'),
(0xF95C, 'M', '樂'),
(0xF95D, 'M', '諾'),
(0xF95E, 'M', '丹'),
(0xF95F, 'M', '寧'),
(0xF960, 'M', '怒'),
(0xF961, 'M', '率'),
(0xF962, 'M', '異'),
(0xF963, 'M', '北'),
(0xF964, 'M', '磻'),
(0xF965, 'M', '便'),
(0xF966, 'M', '復'),
(0xF967, 'M', '不'),
(0xF968, 'M', '泌'),
(0xF969, 'M', '數'),
(0xF96A, 'M', '索'),
(0xF96B, 'M', '參'),
(0xF96C, 'M', '塞'),
(0xF96D, 'M', '省'),
(0xF96E, 'M', '葉'),
(0xF96F, 'M', '說'),
(0xF970, 'M', '殺'),
(0xF971, 'M', '辰'),
(0xF972, 'M', '沈'),
(0xF973, 'M', '拾'),
(0xF974, 'M', '若'),
(0xF975, 'M', '掠'),
(0xF976, 'M', '略'),
(0xF977, 'M', '亮'),
(0xF978, 'M', '兩'),
(0xF979, 'M', '凉'),
(0xF97A, 'M', '梁'),
(0xF97B, 'M', '糧'),
(0xF97C, 'M', '良'),
(0xF97D, 'M', '諒'),
(0xF97E, 'M', '量'),
(0xF97F, 'M', '勵'),
(0xF980, 'M', '呂'),
(0xF981, 'M', '女'),
(0xF982, 'M', '廬'),
(0xF983, 'M', '旅'),
(0xF984, 'M', '濾'),
(0xF985, 'M', '礪'),
(0xF986, 'M', '閭'),
(0xF987, 'M', '驪'),
(0xF988, 'M', '麗'),
(0xF989, 'M', '黎'),
(0xF98A, 'M', '力'),
(0xF98B, 'M', '曆'),
(0xF98C, 'M', '歷'),
(0xF98D, 'M', '轢'),
(0xF98E, 'M', '年'),
(0xF98F, 'M', '憐'),
(0xF990, 'M', '戀'),
(0xF991, 'M', '撚'),
(0xF992, 'M', '漣'),
(0xF993, 'M', '煉'),
(0xF994, 'M', '璉'),
(0xF995, 'M', '秊'),
(0xF996, 'M', '練'),
(0xF997, 'M', '聯'),
(0xF998, 'M', '輦'),
(0xF999, 'M', '蓮'),
(0xF99A, 'M', '連'),
(0xF99B, 'M', '鍊'),
(0xF99C, 'M', '列'),
(0xF99D, 'M', '劣'),
(0xF99E, 'M', '咽'),
(0xF99F, 'M', '烈'),
(0xF9A0, 'M', '裂'),
(0xF9A1, 'M', '說'),
(0xF9A2, 'M', '廉'),
(0xF9A3, 'M', '念'),
(0xF9A4, 'M', '捻'),
(0xF9A5, 'M', '殮'),
(0xF9A6, 'M', '簾'),
(0xF9A7, 'M', '獵'),
(0xF9A8, 'M', '令'),
(0xF9A9, 'M', '囹'),
]
def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xF9AA, 'M', '寧'),
(0xF9AB, 'M', '嶺'),
(0xF9AC, 'M', '怜'),
(0xF9AD, 'M', '玲'),
(0xF9AE, 'M', '瑩'),
(0xF9AF, 'M', '羚'),
(0xF9B0, 'M', '聆'),
(0xF9B1, 'M', '鈴'),
(0xF9B2, 'M', '零'),
(0xF9B3, 'M', '靈'),
(0xF9B4, 'M', '領'),
(0xF9B5, 'M', '例'),
(0xF9B6, 'M', '禮'),
(0xF9B7, 'M', '醴'),
(0xF9B8, 'M', '隸'),
(0xF9B9, 'M', '惡'),
(0xF9BA, 'M', '了'),
(0xF9BB, 'M', '僚'),
(0xF9BC, 'M', '寮'),
(0xF9BD, 'M', '尿'),
(0xF9BE, 'M', '料'),
(0xF9BF, 'M', '樂'),
(0xF9C0, 'M', '燎'),
(0xF9C1, 'M', '療'),
(0xF9C2, 'M', '蓼'),
(0xF9C3, 'M', '遼'),
(0xF9C4, 'M', '龍'),
(0xF9C5, 'M', '暈'),
(0xF9C6, 'M', '阮'),
(0xF9C7, 'M', '劉'),
(0xF9C8, 'M', '杻'),
(0xF9C9, 'M', '柳'),
(0xF9CA, 'M', '流'),
(0xF9CB, 'M', '溜'),
(0xF9CC, 'M', '琉'),
(0xF9CD, 'M', '留'),
(0xF9CE, 'M', '硫'),
(0xF9CF, 'M', '紐'),
(0xF9D0, 'M', '類'),
(0xF9D1, 'M', '六'),
(0xF9D2, 'M', '戮'),
(0xF9D3, 'M', '陸'),
(0xF9D4, 'M', '倫'),
(0xF9D5, 'M', '崙'),
(0xF9D6, 'M', '淪'),
(0xF9D7, 'M', '輪'),
(0xF9D8, 'M', '律'),
(0xF9D9, 'M', '慄'),
(0xF9DA, 'M', '栗'),
(0xF9DB, 'M', '率'),
(0xF9DC, 'M', '隆'),
(0xF9DD, 'M', '利'),
(0xF9DE, 'M', '吏'),
(0xF9DF, 'M', '履'),
(0xF9E0, 'M', '易'),
(0xF9E1, 'M', '李'),
(0xF9E2, 'M', '梨'),
(0xF9E3, 'M', '泥'),
(0xF9E4, 'M', '理'),
(0xF9E5, 'M', '痢'),
(0xF9E6, 'M', '罹'),
(0xF9E7, 'M', '裏'),
(0xF9E8, 'M', '裡'),
(0xF9E9, 'M', '里'),
(0xF9EA, 'M', '離'),
(0xF9EB, 'M', '匿'),
(0xF9EC, 'M', '溺'),
(0xF9ED, 'M', '吝'),
(0xF9EE, 'M', '燐'),
(0xF9EF, 'M', '璘'),
(0xF9F0, 'M', '藺'),
(0xF9F1, 'M', '隣'),
(0xF9F2, 'M', '鱗'),
(0xF9F3, 'M', '麟'),
(0xF9F4, 'M', '林'),
(0xF9F5, 'M', '淋'),
(0xF9F6, 'M', '臨'),
(0xF9F7, 'M', '立'),
(0xF9F8, 'M', '笠'),
(0xF9F9, 'M', '粒'),
(0xF9FA, 'M', '狀'),
(0xF9FB, 'M', '炙'),
(0xF9FC, 'M', '識'),
(0xF9FD, 'M', '什'),
(0xF9FE, 'M', '茶'),
(0xF9FF, 'M', '刺'),
(0xFA00, 'M', '切'),
(0xFA01, 'M', '度'),
(0xFA02, 'M', '拓'),
(0xFA03, 'M', '糖'),
(0xFA04, 'M', '宅'),
(0xFA05, 'M', '洞'),
(0xFA06, 'M', '暴'),
(0xFA07, 'M', '輻'),
(0xFA08, 'M', '行'),
(0xFA09, 'M', '降'),
(0xFA0A, 'M', '見'),
(0xFA0B, 'M', '廓'),
(0xFA0C, 'M', '兀'),
(0xFA0D, 'M', '嗀'),
]
def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFA0E, 'V'),
(0xFA10, 'M', '塚'),
(0xFA11, 'V'),
(0xFA12, 'M', '晴'),
(0xFA13, 'V'),
(0xFA15, 'M', '凞'),
(0xFA16, 'M', '猪'),
(0xFA17, 'M', '益'),
(0xFA18, 'M', '礼'),
(0xFA19, 'M', '神'),
(0xFA1A, 'M', '祥'),
(0xFA1B, 'M', '福'),
(0xFA1C, 'M', '靖'),
(0xFA1D, 'M', '精'),
(0xFA1E, 'M', '羽'),
(0xFA1F, 'V'),
(0xFA20, 'M', '蘒'),
(0xFA21, 'V'),
(0xFA22, 'M', '諸'),
(0xFA23, 'V'),
(0xFA25, 'M', '逸'),
(0xFA26, 'M', '都'),
(0xFA27, 'V'),
(0xFA2A, 'M', '飯'),
(0xFA2B, 'M', '飼'),
(0xFA2C, 'M', '館'),
(0xFA2D, 'M', '鶴'),
(0xFA2E, 'M', '郞'),
(0xFA2F, 'M', '隷'),
(0xFA30, 'M', '侮'),
(0xFA31, 'M', '僧'),
(0xFA32, 'M', '免'),
(0xFA33, 'M', '勉'),
(0xFA34, 'M', '勤'),
(0xFA35, 'M', '卑'),
(0xFA36, 'M', '喝'),
(0xFA37, 'M', '嘆'),
(0xFA38, 'M', '器'),
(0xFA39, 'M', '塀'),
(0xFA3A, 'M', '墨'),
(0xFA3B, 'M', '層'),
(0xFA3C, 'M', '屮'),
(0xFA3D, 'M', '悔'),
(0xFA3E, 'M', '慨'),
(0xFA3F, 'M', '憎'),
(0xFA40, 'M', '懲'),
(0xFA41, 'M', '敏'),
(0xFA42, 'M', '既'),
(0xFA43, 'M', '暑'),
(0xFA44, 'M', '梅'),
(0xFA45, 'M', '海'),
(0xFA46, 'M', '渚'),
(0xFA47, 'M', '漢'),
(0xFA48, 'M', '煮'),
(0xFA49, 'M', '爫'),
(0xFA4A, 'M', '琢'),
(0xFA4B, 'M', '碑'),
(0xFA4C, 'M', '社'),
(0xFA4D, 'M', '祉'),
(0xFA4E, 'M', '祈'),
(0xFA4F, 'M', '祐'),
(0xFA50, 'M', '祖'),
(0xFA51, 'M', '祝'),
(0xFA52, 'M', '禍'),
(0xFA53, 'M', '禎'),
(0xFA54, 'M', '穀'),
(0xFA55, 'M', '突'),
(0xFA56, 'M', '節'),
(0xFA57, 'M', '練'),
(0xFA58, 'M', '縉'),
(0xFA59, 'M', '繁'),
(0xFA5A, 'M', '署'),
(0xFA5B, 'M', '者'),
(0xFA5C, 'M', '臭'),
(0xFA5D, 'M', '艹'),
(0xFA5F, 'M', '著'),
(0xFA60, 'M', '褐'),
(0xFA61, 'M', '視'),
(0xFA62, 'M', '謁'),
(0xFA63, 'M', '謹'),
(0xFA64, 'M', '賓'),
(0xFA65, 'M', '贈'),
(0xFA66, 'M', '辶'),
(0xFA67, 'M', '逸'),
(0xFA68, 'M', '難'),
(0xFA69, 'M', '響'),
(0xFA6A, 'M', '頻'),
(0xFA6B, 'M', '恵'),
(0xFA6C, 'M', '𤋮'),
(0xFA6D, 'M', '舘'),
(0xFA6E, 'X'),
(0xFA70, 'M', '並'),
(0xFA71, 'M', '况'),
(0xFA72, 'M', '全'),
(0xFA73, 'M', '侀'),
(0xFA74, 'M', '充'),
(0xFA75, 'M', '冀'),
(0xFA76, 'M', '勇'),
(0xFA77, 'M', '勺'),
(0xFA78, 'M', '喝'),
]
def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFA79, 'M', '啕'),
(0xFA7A, 'M', '喙'),
(0xFA7B, 'M', '嗢'),
(0xFA7C, 'M', '塚'),
(0xFA7D, 'M', '墳'),
(0xFA7E, 'M', '奄'),
(0xFA7F, 'M', '奔'),
(0xFA80, 'M', '婢'),
(0xFA81, 'M', '嬨'),
(0xFA82, 'M', '廒'),
(0xFA83, 'M', '廙'),
(0xFA84, 'M', '彩'),
(0xFA85, 'M', '徭'),
(0xFA86, 'M', '惘'),
(0xFA87, 'M', '慎'),
(0xFA88, 'M', '愈'),
(0xFA89, 'M', '憎'),
(0xFA8A, 'M', '慠'),
(0xFA8B, 'M', '懲'),
(0xFA8C, 'M', '戴'),
(0xFA8D, 'M', '揄'),
(0xFA8E, 'M', '搜'),
(0xFA8F, 'M', '摒'),
(0xFA90, 'M', '敖'),
(0xFA91, 'M', '晴'),
(0xFA92, 'M', '朗'),
(0xFA93, 'M', '望'),
(0xFA94, 'M', '杖'),
(0xFA95, 'M', '歹'),
(0xFA96, 'M', '殺'),
(0xFA97, 'M', '流'),
(0xFA98, 'M', '滛'),
(0xFA99, 'M', '滋'),
(0xFA9A, 'M', '漢'),
(0xFA9B, 'M', '瀞'),
(0xFA9C, 'M', '煮'),
(0xFA9D, 'M', '瞧'),
(0xFA9E, 'M', '爵'),
(0xFA9F, 'M', '犯'),
(0xFAA0, 'M', '猪'),
(0xFAA1, 'M', '瑱'),
(0xFAA2, 'M', '甆'),
(0xFAA3, 'M', '画'),
(0xFAA4, 'M', '瘝'),
(0xFAA5, 'M', '瘟'),
(0xFAA6, 'M', '益'),
(0xFAA7, 'M', '盛'),
(0xFAA8, 'M', '直'),
(0xFAA9, 'M', '睊'),
(0xFAAA, 'M', '着'),
(0xFAAB, 'M', '磌'),
(0xFAAC, 'M', '窱'),
(0xFAAD, 'M', '節'),
(0xFAAE, 'M', '类'),
(0xFAAF, 'M', '絛'),
(0xFAB0, 'M', '練'),
(0xFAB1, 'M', '缾'),
(0xFAB2, 'M', '者'),
(0xFAB3, 'M', '荒'),
(0xFAB4, 'M', '華'),
(0xFAB5, 'M', '蝹'),
(0xFAB6, 'M', '襁'),
(0xFAB7, 'M', '覆'),
(0xFAB8, 'M', '視'),
(0xFAB9, 'M', '調'),
(0xFABA, 'M', '諸'),
(0xFABB, 'M', '請'),
(0xFABC, 'M', '謁'),
(0xFABD, 'M', '諾'),
(0xFABE, 'M', '諭'),
(0xFABF, 'M', '謹'),
(0xFAC0, 'M', '變'),
(0xFAC1, 'M', '贈'),
(0xFAC2, 'M', '輸'),
(0xFAC3, 'M', '遲'),
(0xFAC4, 'M', '醙'),
(0xFAC5, 'M', '鉶'),
(0xFAC6, 'M', '陼'),
(0xFAC7, 'M', '難'),
(0xFAC8, 'M', '靖'),
(0xFAC9, 'M', '韛'),
(0xFACA, 'M', '響'),
(0xFACB, 'M', '頋'),
(0xFACC, 'M', '頻'),
(0xFACD, 'M', '鬒'),
(0xFACE, 'M', '龜'),
(0xFACF, 'M', '𢡊'),
(0xFAD0, 'M', '𢡄'),
(0xFAD1, 'M', '𣏕'),
(0xFAD2, 'M', '㮝'),
(0xFAD3, 'M', '䀘'),
(0xFAD4, 'M', '䀹'),
(0xFAD5, 'M', '𥉉'),
(0xFAD6, 'M', '𥳐'),
(0xFAD7, 'M', '𧻓'),
(0xFAD8, 'M', '齃'),
(0xFAD9, 'M', '龎'),
(0xFADA, 'X'),
(0xFB00, 'M', 'ff'),
(0xFB01, 'M', 'fi'),
]
def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFB02, 'M', 'fl'),
(0xFB03, 'M', 'ffi'),
(0xFB04, 'M', 'ffl'),
(0xFB05, 'M', 'st'),
(0xFB07, 'X'),
(0xFB13, 'M', 'մն'),
(0xFB14, 'M', 'մե'),
(0xFB15, 'M', 'մի'),
(0xFB16, 'M', 'վն'),
(0xFB17, 'M', 'մխ'),
(0xFB18, 'X'),
(0xFB1D, 'M', 'יִ'),
(0xFB1E, 'V'),
(0xFB1F, 'M', 'ײַ'),
(0xFB20, 'M', 'ע'),
(0xFB21, 'M', 'א'),
(0xFB22, 'M', 'ד'),
(0xFB23, 'M', 'ה'),
(0xFB24, 'M', 'כ'),
(0xFB25, 'M', 'ל'),
(0xFB26, 'M', 'ם'),
(0xFB27, 'M', 'ר'),
(0xFB28, 'M', 'ת'),
(0xFB29, '3', '+'),
(0xFB2A, 'M', 'שׁ'),
(0xFB2B, 'M', 'שׂ'),
(0xFB2C, 'M', 'שּׁ'),
(0xFB2D, 'M', 'שּׂ'),
(0xFB2E, 'M', 'אַ'),
(0xFB2F, 'M', 'אָ'),
(0xFB30, 'M', 'אּ'),
(0xFB31, 'M', 'בּ'),
(0xFB32, 'M', 'גּ'),
(0xFB33, 'M', 'דּ'),
(0xFB34, 'M', 'הּ'),
(0xFB35, 'M', 'וּ'),
(0xFB36, 'M', 'זּ'),
(0xFB37, 'X'),
(0xFB38, 'M', 'טּ'),
(0xFB39, 'M', 'יּ'),
(0xFB3A, 'M', 'ךּ'),
(0xFB3B, 'M', 'כּ'),
(0xFB3C, 'M', 'לּ'),
(0xFB3D, 'X'),
(0xFB3E, 'M', 'מּ'),
(0xFB3F, 'X'),
(0xFB40, 'M', 'נּ'),
(0xFB41, 'M', 'סּ'),
(0xFB42, 'X'),
(0xFB43, 'M', 'ףּ'),
(0xFB44, 'M', 'פּ'),
(0xFB45, 'X'),
(0xFB46, 'M', 'צּ'),
(0xFB47, 'M', 'קּ'),
(0xFB48, 'M', 'רּ'),
(0xFB49, 'M', 'שּ'),
(0xFB4A, 'M', 'תּ'),
(0xFB4B, 'M', 'וֹ'),
(0xFB4C, 'M', 'בֿ'),
(0xFB4D, 'M', 'כֿ'),
(0xFB4E, 'M', 'פֿ'),
(0xFB4F, 'M', 'אל'),
(0xFB50, 'M', 'ٱ'),
(0xFB52, 'M', 'ٻ'),
(0xFB56, 'M', 'پ'),
(0xFB5A, 'M', 'ڀ'),
(0xFB5E, 'M', 'ٺ'),
(0xFB62, 'M', 'ٿ'),
(0xFB66, 'M', 'ٹ'),
(0xFB6A, 'M', 'ڤ'),
(0xFB6E, 'M', 'ڦ'),
(0xFB72, 'M', 'ڄ'),
(0xFB76, 'M', 'ڃ'),
(0xFB7A, 'M', 'چ'),
(0xFB7E, 'M', 'ڇ'),
(0xFB82, 'M', 'ڍ'),
(0xFB84, 'M', 'ڌ'),
(0xFB86, 'M', 'ڎ'),
(0xFB88, 'M', 'ڈ'),
(0xFB8A, 'M', 'ژ'),
(0xFB8C, 'M', 'ڑ'),
(0xFB8E, 'M', 'ک'),
(0xFB92, 'M', 'گ'),
(0xFB96, 'M', 'ڳ'),
(0xFB9A, 'M', 'ڱ'),
(0xFB9E, 'M', 'ں'),
(0xFBA0, 'M', 'ڻ'),
(0xFBA4, 'M', 'ۀ'),
(0xFBA6, 'M', 'ہ'),
(0xFBAA, 'M', 'ھ'),
(0xFBAE, 'M', 'ے'),
(0xFBB0, 'M', 'ۓ'),
(0xFBB2, 'V'),
(0xFBC3, 'X'),
(0xFBD3, 'M', 'ڭ'),
(0xFBD7, 'M', 'ۇ'),
(0xFBD9, 'M', 'ۆ'),
(0xFBDB, 'M', 'ۈ'),
(0xFBDD, 'M', 'ۇٴ'),
(0xFBDE, 'M', 'ۋ'),
]
def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFBE0, 'M', 'ۅ'),
(0xFBE2, 'M', 'ۉ'),
(0xFBE4, 'M', 'ې'),
(0xFBE8, 'M', 'ى'),
(0xFBEA, 'M', 'ئا'),
(0xFBEC, 'M', 'ئە'),
(0xFBEE, 'M', 'ئو'),
(0xFBF0, 'M', 'ئۇ'),
(0xFBF2, 'M', 'ئۆ'),
(0xFBF4, 'M', 'ئۈ'),
(0xFBF6, 'M', 'ئې'),
(0xFBF9, 'M', 'ئى'),
(0xFBFC, 'M', 'ی'),
(0xFC00, 'M', 'ئج'),
(0xFC01, 'M', 'ئح'),
(0xFC02, 'M', 'ئم'),
(0xFC03, 'M', 'ئى'),
(0xFC04, 'M', 'ئي'),
(0xFC05, 'M', 'بج'),
(0xFC06, 'M', 'بح'),
(0xFC07, 'M', 'بخ'),
(0xFC08, 'M', 'بم'),
(0xFC09, 'M', 'بى'),
(0xFC0A, 'M', 'بي'),
(0xFC0B, 'M', 'تج'),
(0xFC0C, 'M', 'تح'),
(0xFC0D, 'M', 'تخ'),
(0xFC0E, 'M', 'تم'),
(0xFC0F, 'M', 'تى'),
(0xFC10, 'M', 'تي'),
(0xFC11, 'M', 'ثج'),
(0xFC12, 'M', 'ثم'),
(0xFC13, 'M', 'ثى'),
(0xFC14, 'M', 'ثي'),
(0xFC15, 'M', 'جح'),
(0xFC16, 'M', 'جم'),
(0xFC17, 'M', 'حج'),
(0xFC18, 'M', 'حم'),
(0xFC19, 'M', 'خج'),
(0xFC1A, 'M', 'خح'),
(0xFC1B, 'M', 'خم'),
(0xFC1C, 'M', 'سج'),
(0xFC1D, 'M', 'سح'),
(0xFC1E, 'M', 'سخ'),
(0xFC1F, 'M', 'سم'),
(0xFC20, 'M', 'صح'),
(0xFC21, 'M', 'صم'),
(0xFC22, 'M', 'ضج'),
(0xFC23, 'M', 'ضح'),
(0xFC24, 'M', 'ضخ'),
(0xFC25, 'M', 'ضم'),
(0xFC26, 'M', 'طح'),
(0xFC27, 'M', 'طم'),
(0xFC28, 'M', 'ظم'),
(0xFC29, 'M', 'عج'),
(0xFC2A, 'M', 'عم'),
(0xFC2B, 'M', 'غج'),
(0xFC2C, 'M', 'غم'),
(0xFC2D, 'M', 'فج'),
(0xFC2E, 'M', 'فح'),
(0xFC2F, 'M', 'فخ'),
(0xFC30, 'M', 'فم'),
(0xFC31, 'M', 'فى'),
(0xFC32, 'M', 'في'),
(0xFC33, 'M', 'قح'),
(0xFC34, 'M', 'قم'),
(0xFC35, 'M', 'قى'),
(0xFC36, 'M', 'قي'),
(0xFC37, 'M', 'كا'),
(0xFC38, 'M', 'كج'),
(0xFC39, 'M', 'كح'),
(0xFC3A, 'M', 'كخ'),
(0xFC3B, 'M', 'كل'),
(0xFC3C, 'M', 'كم'),
(0xFC3D, 'M', 'كى'),
(0xFC3E, 'M', 'كي'),
(0xFC3F, 'M', 'لج'),
(0xFC40, 'M', 'لح'),
(0xFC41, 'M', 'لخ'),
(0xFC42, 'M', 'لم'),
(0xFC43, 'M', 'لى'),
(0xFC44, 'M', 'لي'),
(0xFC45, 'M', 'مج'),
(0xFC46, 'M', 'مح'),
(0xFC47, 'M', 'مخ'),
(0xFC48, 'M', 'مم'),
(0xFC49, 'M', 'مى'),
(0xFC4A, 'M', 'مي'),
(0xFC4B, 'M', 'نج'),
(0xFC4C, 'M', 'نح'),
(0xFC4D, 'M', 'نخ'),
(0xFC4E, 'M', 'نم'),
(0xFC4F, 'M', 'نى'),
(0xFC50, 'M', 'ني'),
(0xFC51, 'M', 'هج'),
(0xFC52, 'M', 'هم'),
(0xFC53, 'M', 'هى'),
(0xFC54, 'M', 'هي'),
(0xFC55, 'M', 'يج'),
(0xFC56, 'M', 'يح'),
]
def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFC57, 'M', 'يخ'),
(0xFC58, 'M', 'يم'),
(0xFC59, 'M', 'يى'),
(0xFC5A, 'M', 'يي'),
(0xFC5B, 'M', 'ذٰ'),
(0xFC5C, 'M', 'رٰ'),
(0xFC5D, 'M', 'ىٰ'),
(0xFC5E, '3', ' ٌّ'),
(0xFC5F, '3', ' ٍّ'),
(0xFC60, '3', ' َّ'),
(0xFC61, '3', ' ُّ'),
(0xFC62, '3', ' ِّ'),
(0xFC63, '3', ' ّٰ'),
(0xFC64, 'M', 'ئر'),
(0xFC65, 'M', 'ئز'),
(0xFC66, 'M', 'ئم'),
(0xFC67, 'M', 'ئن'),
(0xFC68, 'M', 'ئى'),
(0xFC69, 'M', 'ئي'),
(0xFC6A, 'M', 'بر'),
(0xFC6B, 'M', 'بز'),
(0xFC6C, 'M', 'بم'),
(0xFC6D, 'M', 'بن'),
(0xFC6E, 'M', 'بى'),
(0xFC6F, 'M', 'بي'),
(0xFC70, 'M', 'تر'),
(0xFC71, 'M', 'تز'),
(0xFC72, 'M', 'تم'),
(0xFC73, 'M', 'تن'),
(0xFC74, 'M', 'تى'),
(0xFC75, 'M', 'تي'),
(0xFC76, 'M', 'ثر'),
(0xFC77, 'M', 'ثز'),
(0xFC78, 'M', 'ثم'),
(0xFC79, 'M', 'ثن'),
(0xFC7A, 'M', 'ثى'),
(0xFC7B, 'M', 'ثي'),
(0xFC7C, 'M', 'فى'),
(0xFC7D, 'M', 'في'),
(0xFC7E, 'M', 'قى'),
(0xFC7F, 'M', 'قي'),
(0xFC80, 'M', 'كا'),
(0xFC81, 'M', 'كل'),
(0xFC82, 'M', 'كم'),
(0xFC83, 'M', 'كى'),
(0xFC84, 'M', 'كي'),
(0xFC85, 'M', 'لم'),
(0xFC86, 'M', 'لى'),
(0xFC87, 'M', 'لي'),
(0xFC88, 'M', 'ما'),
(0xFC89, 'M', 'مم'),
(0xFC8A, 'M', 'نر'),
(0xFC8B, 'M', 'نز'),
(0xFC8C, 'M', 'نم'),
(0xFC8D, 'M', 'نن'),
(0xFC8E, 'M', 'نى'),
(0xFC8F, 'M', 'ني'),
(0xFC90, 'M', 'ىٰ'),
(0xFC91, 'M', 'ير'),
(0xFC92, 'M', 'يز'),
(0xFC93, 'M', 'يم'),
(0xFC94, 'M', 'ين'),
(0xFC95, 'M', 'يى'),
(0xFC96, 'M', 'يي'),
(0xFC97, 'M', 'ئج'),
(0xFC98, 'M', 'ئح'),
(0xFC99, 'M', 'ئخ'),
(0xFC9A, 'M', 'ئم'),
(0xFC9B, 'M', 'ئه'),
(0xFC9C, 'M', 'بج'),
(0xFC9D, 'M', 'بح'),
(0xFC9E, 'M', 'بخ'),
(0xFC9F, 'M', 'بم'),
(0xFCA0, 'M', 'به'),
(0xFCA1, 'M', 'تج'),
(0xFCA2, 'M', 'تح'),
(0xFCA3, 'M', 'تخ'),
(0xFCA4, 'M', 'تم'),
(0xFCA5, 'M', 'ته'),
(0xFCA6, 'M', 'ثم'),
(0xFCA7, 'M', 'جح'),
(0xFCA8, 'M', 'جم'),
(0xFCA9, 'M', 'حج'),
(0xFCAA, 'M', 'حم'),
(0xFCAB, 'M', 'خج'),
(0xFCAC, 'M', 'خم'),
(0xFCAD, 'M', 'سج'),
(0xFCAE, 'M', 'سح'),
(0xFCAF, 'M', 'سخ'),
(0xFCB0, 'M', 'سم'),
(0xFCB1, 'M', 'صح'),
(0xFCB2, 'M', 'صخ'),
(0xFCB3, 'M', 'صم'),
(0xFCB4, 'M', 'ضج'),
(0xFCB5, 'M', 'ضح'),
(0xFCB6, 'M', 'ضخ'),
(0xFCB7, 'M', 'ضم'),
(0xFCB8, 'M', 'طح'),
(0xFCB9, 'M', 'ظم'),
(0xFCBA, 'M', 'عج'),
]
def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFCBB, 'M', 'عم'),
(0xFCBC, 'M', 'غج'),
(0xFCBD, 'M', 'غم'),
(0xFCBE, 'M', 'فج'),
(0xFCBF, 'M', 'فح'),
(0xFCC0, 'M', 'فخ'),
(0xFCC1, 'M', 'فم'),
(0xFCC2, 'M', 'قح'),
(0xFCC3, 'M', 'قم'),
(0xFCC4, 'M', 'كج'),
(0xFCC5, 'M', 'كح'),
(0xFCC6, 'M', 'كخ'),
(0xFCC7, 'M', 'كل'),
(0xFCC8, 'M', 'كم'),
(0xFCC9, 'M', 'لج'),
(0xFCCA, 'M', 'لح'),
(0xFCCB, 'M', 'لخ'),
(0xFCCC, 'M', 'لم'),
(0xFCCD, 'M', 'له'),
(0xFCCE, 'M', 'مج'),
(0xFCCF, 'M', 'مح'),
(0xFCD0, 'M', 'مخ'),
(0xFCD1, 'M', 'مم'),
(0xFCD2, 'M', 'نج'),
(0xFCD3, 'M', 'نح'),
(0xFCD4, 'M', 'نخ'),
(0xFCD5, 'M', 'نم'),
(0xFCD6, 'M', 'نه'),
(0xFCD7, 'M', 'هج'),
(0xFCD8, 'M', 'هم'),
(0xFCD9, 'M', 'هٰ'),
(0xFCDA, 'M', 'يج'),
(0xFCDB, 'M', 'يح'),
(0xFCDC, 'M', 'يخ'),
(0xFCDD, 'M', 'يم'),
(0xFCDE, 'M', 'يه'),
(0xFCDF, 'M', 'ئم'),
(0xFCE0, 'M', 'ئه'),
(0xFCE1, 'M', 'بم'),
(0xFCE2, 'M', 'به'),
(0xFCE3, 'M', 'تم'),
(0xFCE4, 'M', 'ته'),
(0xFCE5, 'M', 'ثم'),
(0xFCE6, 'M', 'ثه'),
(0xFCE7, 'M', 'سم'),
(0xFCE8, 'M', 'سه'),
(0xFCE9, 'M', 'شم'),
(0xFCEA, 'M', 'شه'),
(0xFCEB, 'M', 'كل'),
(0xFCEC, 'M', 'كم'),
(0xFCED, 'M', 'لم'),
(0xFCEE, 'M', 'نم'),
(0xFCEF, 'M', 'نه'),
(0xFCF0, 'M', 'يم'),
(0xFCF1, 'M', 'يه'),
(0xFCF2, 'M', 'ـَّ'),
(0xFCF3, 'M', 'ـُّ'),
(0xFCF4, 'M', 'ـِّ'),
(0xFCF5, 'M', 'طى'),
(0xFCF6, 'M', 'طي'),
(0xFCF7, 'M', 'عى'),
(0xFCF8, 'M', 'عي'),
(0xFCF9, 'M', 'غى'),
(0xFCFA, 'M', 'غي'),
(0xFCFB, 'M', 'سى'),
(0xFCFC, 'M', 'سي'),
(0xFCFD, 'M', 'شى'),
(0xFCFE, 'M', 'شي'),
(0xFCFF, 'M', 'حى'),
(0xFD00, 'M', 'حي'),
(0xFD01, 'M', 'جى'),
(0xFD02, 'M', 'جي'),
(0xFD03, 'M', 'خى'),
(0xFD04, 'M', 'خي'),
(0xFD05, 'M', 'صى'),
(0xFD06, 'M', 'صي'),
(0xFD07, 'M', 'ضى'),
(0xFD08, 'M', 'ضي'),
(0xFD09, 'M', 'شج'),
(0xFD0A, 'M', 'شح'),
(0xFD0B, 'M', 'شخ'),
(0xFD0C, 'M', 'شم'),
(0xFD0D, 'M', 'شر'),
(0xFD0E, 'M', 'سر'),
(0xFD0F, 'M', 'صر'),
(0xFD10, 'M', 'ضر'),
(0xFD11, 'M', 'طى'),
(0xFD12, 'M', 'طي'),
(0xFD13, 'M', 'عى'),
(0xFD14, 'M', 'عي'),
(0xFD15, 'M', 'غى'),
(0xFD16, 'M', 'غي'),
(0xFD17, 'M', 'سى'),
(0xFD18, 'M', 'سي'),
(0xFD19, 'M', 'شى'),
(0xFD1A, 'M', 'شي'),
(0xFD1B, 'M', 'حى'),
(0xFD1C, 'M', 'حي'),
(0xFD1D, 'M', 'جى'),
(0xFD1E, 'M', 'جي'),
]
def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFD1F, 'M', 'خى'),
(0xFD20, 'M', 'خي'),
(0xFD21, 'M', 'صى'),
(0xFD22, 'M', 'صي'),
(0xFD23, 'M', 'ضى'),
(0xFD24, 'M', 'ضي'),
(0xFD25, 'M', 'شج'),
(0xFD26, 'M', 'شح'),
(0xFD27, 'M', 'شخ'),
(0xFD28, 'M', 'شم'),
(0xFD29, 'M', 'شر'),
(0xFD2A, 'M', 'سر'),
(0xFD2B, 'M', 'صر'),
(0xFD2C, 'M', 'ضر'),
(0xFD2D, 'M', 'شج'),
(0xFD2E, 'M', 'شح'),
(0xFD2F, 'M', 'شخ'),
(0xFD30, 'M', 'شم'),
(0xFD31, 'M', 'سه'),
(0xFD32, 'M', 'شه'),
(0xFD33, 'M', 'طم'),
(0xFD34, 'M', 'سج'),
(0xFD35, 'M', 'سح'),
(0xFD36, 'M', 'سخ'),
(0xFD37, 'M', 'شج'),
(0xFD38, 'M', 'شح'),
(0xFD39, 'M', 'شخ'),
(0xFD3A, 'M', 'طم'),
(0xFD3B, 'M', 'ظم'),
(0xFD3C, 'M', 'اً'),
(0xFD3E, 'V'),
(0xFD50, 'M', 'تجم'),
(0xFD51, 'M', 'تحج'),
(0xFD53, 'M', 'تحم'),
(0xFD54, 'M', 'تخم'),
(0xFD55, 'M', 'تمج'),
(0xFD56, 'M', 'تمح'),
(0xFD57, 'M', 'تمخ'),
(0xFD58, 'M', 'جمح'),
(0xFD5A, 'M', 'حمي'),
(0xFD5B, 'M', 'حمى'),
(0xFD5C, 'M', 'سحج'),
(0xFD5D, 'M', 'سجح'),
(0xFD5E, 'M', 'سجى'),
(0xFD5F, 'M', 'سمح'),
(0xFD61, 'M', 'سمج'),
(0xFD62, 'M', 'سمم'),
(0xFD64, 'M', 'صحح'),
(0xFD66, 'M', 'صمم'),
(0xFD67, 'M', 'شحم'),
(0xFD69, 'M', 'شجي'),
(0xFD6A, 'M', 'شمخ'),
(0xFD6C, 'M', 'شمم'),
(0xFD6E, 'M', 'ضحى'),
(0xFD6F, 'M', 'ضخم'),
(0xFD71, 'M', 'طمح'),
(0xFD73, 'M', 'طمم'),
(0xFD74, 'M', 'طمي'),
(0xFD75, 'M', 'عجم'),
(0xFD76, 'M', 'عمم'),
(0xFD78, 'M', 'عمى'),
(0xFD79, 'M', 'غمم'),
(0xFD7A, 'M', 'غمي'),
(0xFD7B, 'M', 'غمى'),
(0xFD7C, 'M', 'فخم'),
(0xFD7E, 'M', 'قمح'),
(0xFD7F, 'M', 'قمم'),
(0xFD80, 'M', 'لحم'),
(0xFD81, 'M', 'لحي'),
(0xFD82, 'M', 'لحى'),
(0xFD83, 'M', 'لجج'),
(0xFD85, 'M', 'لخم'),
(0xFD87, 'M', 'لمح'),
(0xFD89, 'M', 'محج'),
(0xFD8A, 'M', 'محم'),
(0xFD8B, 'M', 'محي'),
(0xFD8C, 'M', 'مجح'),
(0xFD8D, 'M', 'مجم'),
(0xFD8E, 'M', 'مخج'),
(0xFD8F, 'M', 'مخم'),
(0xFD90, 'X'),
(0xFD92, 'M', 'مجخ'),
(0xFD93, 'M', 'همج'),
(0xFD94, 'M', 'همم'),
(0xFD95, 'M', 'نحم'),
(0xFD96, 'M', 'نحى'),
(0xFD97, 'M', 'نجم'),
(0xFD99, 'M', 'نجى'),
(0xFD9A, 'M', 'نمي'),
(0xFD9B, 'M', 'نمى'),
(0xFD9C, 'M', 'يمم'),
(0xFD9E, 'M', 'بخي'),
(0xFD9F, 'M', 'تجي'),
(0xFDA0, 'M', 'تجى'),
(0xFDA1, 'M', 'تخي'),
(0xFDA2, 'M', 'تخى'),
(0xFDA3, 'M', 'تمي'),
(0xFDA4, 'M', 'تمى'),
(0xFDA5, 'M', 'جمي'),
(0xFDA6, 'M', 'جحى'),
]
def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFDA7, 'M', 'جمى'),
(0xFDA8, 'M', 'سخى'),
(0xFDA9, 'M', 'صحي'),
(0xFDAA, 'M', 'شحي'),
(0xFDAB, 'M', 'ضحي'),
(0xFDAC, 'M', 'لجي'),
(0xFDAD, 'M', 'لمي'),
(0xFDAE, 'M', 'يحي'),
(0xFDAF, 'M', 'يجي'),
(0xFDB0, 'M', 'يمي'),
(0xFDB1, 'M', 'ممي'),
(0xFDB2, 'M', 'قمي'),
(0xFDB3, 'M', 'نحي'),
(0xFDB4, 'M', 'قمح'),
(0xFDB5, 'M', 'لحم'),
(0xFDB6, 'M', 'عمي'),
(0xFDB7, 'M', 'كمي'),
(0xFDB8, 'M', 'نجح'),
(0xFDB9, 'M', 'مخي'),
(0xFDBA, 'M', 'لجم'),
(0xFDBB, 'M', 'كمم'),
(0xFDBC, 'M', 'لجم'),
(0xFDBD, 'M', 'نجح'),
(0xFDBE, 'M', 'جحي'),
(0xFDBF, 'M', 'حجي'),
(0xFDC0, 'M', 'مجي'),
(0xFDC1, 'M', 'فمي'),
(0xFDC2, 'M', 'بحي'),
(0xFDC3, 'M', 'كمم'),
(0xFDC4, 'M', 'عجم'),
(0xFDC5, 'M', 'صمم'),
(0xFDC6, 'M', 'سخي'),
(0xFDC7, 'M', 'نجي'),
(0xFDC8, 'X'),
(0xFDCF, 'V'),
(0xFDD0, 'X'),
(0xFDF0, 'M', 'صلے'),
(0xFDF1, 'M', 'قلے'),
(0xFDF2, 'M', 'الله'),
(0xFDF3, 'M', 'اكبر'),
(0xFDF4, 'M', 'محمد'),
(0xFDF5, 'M', 'صلعم'),
(0xFDF6, 'M', 'رسول'),
(0xFDF7, 'M', 'عليه'),
(0xFDF8, 'M', 'وسلم'),
(0xFDF9, 'M', 'صلى'),
(0xFDFA, '3', 'صلى الله عليه وسلم'),
(0xFDFB, '3', 'جل جلاله'),
(0xFDFC, 'M', 'ریال'),
(0xFDFD, 'V'),
(0xFE00, 'I'),
(0xFE10, '3', ','),
(0xFE11, 'M', '、'),
(0xFE12, 'X'),
(0xFE13, '3', ':'),
(0xFE14, '3', ';'),
(0xFE15, '3', '!'),
(0xFE16, '3', '?'),
(0xFE17, 'M', '〖'),
(0xFE18, 'M', '〗'),
(0xFE19, 'X'),
(0xFE20, 'V'),
(0xFE30, 'X'),
(0xFE31, 'M', '—'),
(0xFE32, 'M', '–'),
(0xFE33, '3', '_'),
(0xFE35, '3', '('),
(0xFE36, '3', ')'),
(0xFE37, '3', '{'),
(0xFE38, '3', '}'),
(0xFE39, 'M', '〔'),
(0xFE3A, 'M', '〕'),
(0xFE3B, 'M', '【'),
(0xFE3C, 'M', '】'),
(0xFE3D, 'M', '《'),
(0xFE3E, 'M', '》'),
(0xFE3F, 'M', '〈'),
(0xFE40, 'M', '〉'),
(0xFE41, 'M', '「'),
(0xFE42, 'M', '」'),
(0xFE43, 'M', '『'),
(0xFE44, 'M', '』'),
(0xFE45, 'V'),
(0xFE47, '3', '['),
(0xFE48, '3', ']'),
(0xFE49, '3', ' ̅'),
(0xFE4D, '3', '_'),
(0xFE50, '3', ','),
(0xFE51, 'M', '、'),
(0xFE52, 'X'),
(0xFE54, '3', ';'),
(0xFE55, '3', ':'),
(0xFE56, '3', '?'),
(0xFE57, '3', '!'),
(0xFE58, 'M', '—'),
(0xFE59, '3', '('),
(0xFE5A, '3', ')'),
(0xFE5B, '3', '{'),
(0xFE5C, '3', '}'),
(0xFE5D, 'M', '〔'),
]
def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFE5E, 'M', '〕'),
(0xFE5F, '3', '#'),
(0xFE60, '3', '&'),
(0xFE61, '3', '*'),
(0xFE62, '3', '+'),
(0xFE63, 'M', '-'),
(0xFE64, '3', '<'),
(0xFE65, '3', '>'),
(0xFE66, '3', '='),
(0xFE67, 'X'),
(0xFE68, '3', '\\'),
(0xFE69, '3', '$'),
(0xFE6A, '3', '%'),
(0xFE6B, '3', '@'),
(0xFE6C, 'X'),
(0xFE70, '3', ' ً'),
(0xFE71, 'M', 'ـً'),
(0xFE72, '3', ' ٌ'),
(0xFE73, 'V'),
(0xFE74, '3', ' ٍ'),
(0xFE75, 'X'),
(0xFE76, '3', ' َ'),
(0xFE77, 'M', 'ـَ'),
(0xFE78, '3', ' ُ'),
(0xFE79, 'M', 'ـُ'),
(0xFE7A, '3', ' ِ'),
(0xFE7B, 'M', 'ـِ'),
(0xFE7C, '3', ' ّ'),
(0xFE7D, 'M', 'ـّ'),
(0xFE7E, '3', ' ْ'),
(0xFE7F, 'M', 'ـْ'),
(0xFE80, 'M', 'ء'),
(0xFE81, 'M', 'آ'),
(0xFE83, 'M', 'أ'),
(0xFE85, 'M', 'ؤ'),
(0xFE87, 'M', 'إ'),
(0xFE89, 'M', 'ئ'),
(0xFE8D, 'M', 'ا'),
(0xFE8F, 'M', 'ب'),
(0xFE93, 'M', 'ة'),
(0xFE95, 'M', 'ت'),
(0xFE99, 'M', 'ث'),
(0xFE9D, 'M', 'ج'),
(0xFEA1, 'M', 'ح'),
(0xFEA5, 'M', 'خ'),
(0xFEA9, 'M', 'د'),
(0xFEAB, 'M', 'ذ'),
(0xFEAD, 'M', 'ر'),
(0xFEAF, 'M', 'ز'),
(0xFEB1, 'M', 'س'),
(0xFEB5, 'M', 'ش'),
(0xFEB9, 'M', 'ص'),
(0xFEBD, 'M', 'ض'),
(0xFEC1, 'M', 'ط'),
(0xFEC5, 'M', 'ظ'),
(0xFEC9, 'M', 'ع'),
(0xFECD, 'M', 'غ'),
(0xFED1, 'M', 'ف'),
(0xFED5, 'M', 'ق'),
(0xFED9, 'M', 'ك'),
(0xFEDD, 'M', 'ل'),
(0xFEE1, 'M', 'م'),
(0xFEE5, 'M', 'ن'),
(0xFEE9, 'M', 'ه'),
(0xFEED, 'M', 'و'),
(0xFEEF, 'M', 'ى'),
(0xFEF1, 'M', 'ي'),
(0xFEF5, 'M', 'لآ'),
(0xFEF7, 'M', 'لأ'),
(0xFEF9, 'M', 'لإ'),
(0xFEFB, 'M', 'لا'),
(0xFEFD, 'X'),
(0xFEFF, 'I'),
(0xFF00, 'X'),
(0xFF01, '3', '!'),
(0xFF02, '3', '"'),
(0xFF03, '3', '#'),
(0xFF04, '3', '$'),
(0xFF05, '3', '%'),
(0xFF06, '3', '&'),
(0xFF07, '3', '\''),
(0xFF08, '3', '('),
(0xFF09, '3', ')'),
(0xFF0A, '3', '*'),
(0xFF0B, '3', '+'),
(0xFF0C, '3', ','),
(0xFF0D, 'M', '-'),
(0xFF0E, 'M', '.'),
(0xFF0F, '3', '/'),
(0xFF10, 'M', '0'),
(0xFF11, 'M', '1'),
(0xFF12, 'M', '2'),
(0xFF13, 'M', '3'),
(0xFF14, 'M', '4'),
(0xFF15, 'M', '5'),
(0xFF16, 'M', '6'),
(0xFF17, 'M', '7'),
(0xFF18, 'M', '8'),
(0xFF19, 'M', '9'),
(0xFF1A, '3', ':'),
]
def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFF1B, '3', ';'),
(0xFF1C, '3', '<'),
(0xFF1D, '3', '='),
(0xFF1E, '3', '>'),
(0xFF1F, '3', '?'),
(0xFF20, '3', '@'),
(0xFF21, 'M', 'a'),
(0xFF22, 'M', 'b'),
(0xFF23, 'M', 'c'),
(0xFF24, 'M', 'd'),
(0xFF25, 'M', 'e'),
(0xFF26, 'M', 'f'),
(0xFF27, 'M', 'g'),
(0xFF28, 'M', 'h'),
(0xFF29, 'M', 'i'),
(0xFF2A, 'M', 'j'),
(0xFF2B, 'M', 'k'),
(0xFF2C, 'M', 'l'),
(0xFF2D, 'M', 'm'),
(0xFF2E, 'M', 'n'),
(0xFF2F, 'M', 'o'),
(0xFF30, 'M', 'p'),
(0xFF31, 'M', 'q'),
(0xFF32, 'M', 'r'),
(0xFF33, 'M', 's'),
(0xFF34, 'M', 't'),
(0xFF35, 'M', 'u'),
(0xFF36, 'M', 'v'),
(0xFF37, 'M', 'w'),
(0xFF38, 'M', 'x'),
(0xFF39, 'M', 'y'),
(0xFF3A, 'M', 'z'),
(0xFF3B, '3', '['),
(0xFF3C, '3', '\\'),
(0xFF3D, '3', ']'),
(0xFF3E, '3', '^'),
(0xFF3F, '3', '_'),
(0xFF40, '3', '`'),
(0xFF41, 'M', 'a'),
(0xFF42, 'M', 'b'),
(0xFF43, 'M', 'c'),
(0xFF44, 'M', 'd'),
(0xFF45, 'M', 'e'),
(0xFF46, 'M', 'f'),
(0xFF47, 'M', 'g'),
(0xFF48, 'M', 'h'),
(0xFF49, 'M', 'i'),
(0xFF4A, 'M', 'j'),
(0xFF4B, 'M', 'k'),
(0xFF4C, 'M', 'l'),
(0xFF4D, 'M', 'm'),
(0xFF4E, 'M', 'n'),
(0xFF4F, 'M', 'o'),
(0xFF50, 'M', 'p'),
(0xFF51, 'M', 'q'),
(0xFF52, 'M', 'r'),
(0xFF53, 'M', 's'),
(0xFF54, 'M', 't'),
(0xFF55, 'M', 'u'),
(0xFF56, 'M', 'v'),
(0xFF57, 'M', 'w'),
(0xFF58, 'M', 'x'),
(0xFF59, 'M', 'y'),
(0xFF5A, 'M', 'z'),
(0xFF5B, '3', '{'),
(0xFF5C, '3', '|'),
(0xFF5D, '3', '}'),
(0xFF5E, '3', '~'),
(0xFF5F, 'M', '⦅'),
(0xFF60, 'M', '⦆'),
(0xFF61, 'M', '.'),
(0xFF62, 'M', '「'),
(0xFF63, 'M', '」'),
(0xFF64, 'M', '、'),
(0xFF65, 'M', '・'),
(0xFF66, 'M', 'ヲ'),
(0xFF67, 'M', 'ァ'),
(0xFF68, 'M', 'ィ'),
(0xFF69, 'M', 'ゥ'),
(0xFF6A, 'M', 'ェ'),
(0xFF6B, 'M', 'ォ'),
(0xFF6C, 'M', 'ャ'),
(0xFF6D, 'M', 'ュ'),
(0xFF6E, 'M', 'ョ'),
(0xFF6F, 'M', 'ッ'),
(0xFF70, 'M', 'ー'),
(0xFF71, 'M', 'ア'),
(0xFF72, 'M', 'イ'),
(0xFF73, 'M', 'ウ'),
(0xFF74, 'M', 'エ'),
(0xFF75, 'M', 'オ'),
(0xFF76, 'M', 'カ'),
(0xFF77, 'M', 'キ'),
(0xFF78, 'M', 'ク'),
(0xFF79, 'M', 'ケ'),
(0xFF7A, 'M', 'コ'),
(0xFF7B, 'M', 'サ'),
(0xFF7C, 'M', 'シ'),
(0xFF7D, 'M', 'ス'),
(0xFF7E, 'M', 'セ'),
]
def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFF7F, 'M', 'ソ'),
(0xFF80, 'M', 'タ'),
(0xFF81, 'M', 'チ'),
(0xFF82, 'M', 'ツ'),
(0xFF83, 'M', 'テ'),
(0xFF84, 'M', 'ト'),
(0xFF85, 'M', 'ナ'),
(0xFF86, 'M', 'ニ'),
(0xFF87, 'M', 'ヌ'),
(0xFF88, 'M', 'ネ'),
(0xFF89, 'M', 'ノ'),
(0xFF8A, 'M', 'ハ'),
(0xFF8B, 'M', 'ヒ'),
(0xFF8C, 'M', 'フ'),
(0xFF8D, 'M', 'ヘ'),
(0xFF8E, 'M', 'ホ'),
(0xFF8F, 'M', 'マ'),
(0xFF90, 'M', 'ミ'),
(0xFF91, 'M', 'ム'),
(0xFF92, 'M', 'メ'),
(0xFF93, 'M', 'モ'),
(0xFF94, 'M', 'ヤ'),
(0xFF95, 'M', 'ユ'),
(0xFF96, 'M', 'ヨ'),
(0xFF97, 'M', 'ラ'),
(0xFF98, 'M', 'リ'),
(0xFF99, 'M', 'ル'),
(0xFF9A, 'M', 'レ'),
(0xFF9B, 'M', 'ロ'),
(0xFF9C, 'M', 'ワ'),
(0xFF9D, 'M', 'ン'),
(0xFF9E, 'M', '゙'),
(0xFF9F, 'M', '゚'),
(0xFFA0, 'X'),
(0xFFA1, 'M', 'ᄀ'),
(0xFFA2, 'M', 'ᄁ'),
(0xFFA3, 'M', 'ᆪ'),
(0xFFA4, 'M', 'ᄂ'),
(0xFFA5, 'M', 'ᆬ'),
(0xFFA6, 'M', 'ᆭ'),
(0xFFA7, 'M', 'ᄃ'),
(0xFFA8, 'M', 'ᄄ'),
(0xFFA9, 'M', 'ᄅ'),
(0xFFAA, 'M', 'ᆰ'),
(0xFFAB, 'M', 'ᆱ'),
(0xFFAC, 'M', 'ᆲ'),
(0xFFAD, 'M', 'ᆳ'),
(0xFFAE, 'M', 'ᆴ'),
(0xFFAF, 'M', 'ᆵ'),
(0xFFB0, 'M', 'ᄚ'),
(0xFFB1, 'M', 'ᄆ'),
(0xFFB2, 'M', 'ᄇ'),
(0xFFB3, 'M', 'ᄈ'),
(0xFFB4, 'M', 'ᄡ'),
(0xFFB5, 'M', 'ᄉ'),
(0xFFB6, 'M', 'ᄊ'),
(0xFFB7, 'M', 'ᄋ'),
(0xFFB8, 'M', 'ᄌ'),
(0xFFB9, 'M', 'ᄍ'),
(0xFFBA, 'M', 'ᄎ'),
(0xFFBB, 'M', 'ᄏ'),
(0xFFBC, 'M', 'ᄐ'),
(0xFFBD, 'M', 'ᄑ'),
(0xFFBE, 'M', 'ᄒ'),
(0xFFBF, 'X'),
(0xFFC2, 'M', 'ᅡ'),
(0xFFC3, 'M', 'ᅢ'),
(0xFFC4, 'M', 'ᅣ'),
(0xFFC5, 'M', 'ᅤ'),
(0xFFC6, 'M', 'ᅥ'),
(0xFFC7, 'M', 'ᅦ'),
(0xFFC8, 'X'),
(0xFFCA, 'M', 'ᅧ'),
(0xFFCB, 'M', 'ᅨ'),
(0xFFCC, 'M', 'ᅩ'),
(0xFFCD, 'M', 'ᅪ'),
(0xFFCE, 'M', 'ᅫ'),
(0xFFCF, 'M', 'ᅬ'),
(0xFFD0, 'X'),
(0xFFD2, 'M', 'ᅭ'),
(0xFFD3, 'M', 'ᅮ'),
(0xFFD4, 'M', 'ᅯ'),
(0xFFD5, 'M', 'ᅰ'),
(0xFFD6, 'M', 'ᅱ'),
(0xFFD7, 'M', 'ᅲ'),
(0xFFD8, 'X'),
(0xFFDA, 'M', 'ᅳ'),
(0xFFDB, 'M', 'ᅴ'),
(0xFFDC, 'M', 'ᅵ'),
(0xFFDD, 'X'),
(0xFFE0, 'M', '¢'),
(0xFFE1, 'M', '£'),
(0xFFE2, 'M', '¬'),
(0xFFE3, '3', ' ̄'),
(0xFFE4, 'M', '¦'),
(0xFFE5, 'M', '¥'),
(0xFFE6, 'M', '₩'),
(0xFFE7, 'X'),
(0xFFE8, 'M', '│'),
(0xFFE9, 'M', '←'),
]
def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0xFFEA, 'M', '↑'),
(0xFFEB, 'M', '→'),
(0xFFEC, 'M', '↓'),
(0xFFED, 'M', '■'),
(0xFFEE, 'M', '○'),
(0xFFEF, 'X'),
(0x10000, 'V'),
(0x1000C, 'X'),
(0x1000D, 'V'),
(0x10027, 'X'),
(0x10028, 'V'),
(0x1003B, 'X'),
(0x1003C, 'V'),
(0x1003E, 'X'),
(0x1003F, 'V'),
(0x1004E, 'X'),
(0x10050, 'V'),
(0x1005E, 'X'),
(0x10080, 'V'),
(0x100FB, 'X'),
(0x10100, 'V'),
(0x10103, 'X'),
(0x10107, 'V'),
(0x10134, 'X'),
(0x10137, 'V'),
(0x1018F, 'X'),
(0x10190, 'V'),
(0x1019D, 'X'),
(0x101A0, 'V'),
(0x101A1, 'X'),
(0x101D0, 'V'),
(0x101FE, 'X'),
(0x10280, 'V'),
(0x1029D, 'X'),
(0x102A0, 'V'),
(0x102D1, 'X'),
(0x102E0, 'V'),
(0x102FC, 'X'),
(0x10300, 'V'),
(0x10324, 'X'),
(0x1032D, 'V'),
(0x1034B, 'X'),
(0x10350, 'V'),
(0x1037B, 'X'),
(0x10380, 'V'),
(0x1039E, 'X'),
(0x1039F, 'V'),
(0x103C4, 'X'),
(0x103C8, 'V'),
(0x103D6, 'X'),
(0x10400, 'M', '𐐨'),
(0x10401, 'M', '𐐩'),
(0x10402, 'M', '𐐪'),
(0x10403, 'M', '𐐫'),
(0x10404, 'M', '𐐬'),
(0x10405, 'M', '𐐭'),
(0x10406, 'M', '𐐮'),
(0x10407, 'M', '𐐯'),
(0x10408, 'M', '𐐰'),
(0x10409, 'M', '𐐱'),
(0x1040A, 'M', '𐐲'),
(0x1040B, 'M', '𐐳'),
(0x1040C, 'M', '𐐴'),
(0x1040D, 'M', '𐐵'),
(0x1040E, 'M', '𐐶'),
(0x1040F, 'M', '𐐷'),
(0x10410, 'M', '𐐸'),
(0x10411, 'M', '𐐹'),
(0x10412, 'M', '𐐺'),
(0x10413, 'M', '𐐻'),
(0x10414, 'M', '𐐼'),
(0x10415, 'M', '𐐽'),
(0x10416, 'M', '𐐾'),
(0x10417, 'M', '𐐿'),
(0x10418, 'M', '𐑀'),
(0x10419, 'M', '𐑁'),
(0x1041A, 'M', '𐑂'),
(0x1041B, 'M', '𐑃'),
(0x1041C, 'M', '𐑄'),
(0x1041D, 'M', '𐑅'),
(0x1041E, 'M', '𐑆'),
(0x1041F, 'M', '𐑇'),
(0x10420, 'M', '𐑈'),
(0x10421, 'M', '𐑉'),
(0x10422, 'M', '𐑊'),
(0x10423, 'M', '𐑋'),
(0x10424, 'M', '𐑌'),
(0x10425, 'M', '𐑍'),
(0x10426, 'M', '𐑎'),
(0x10427, 'M', '𐑏'),
(0x10428, 'V'),
(0x1049E, 'X'),
(0x104A0, 'V'),
(0x104AA, 'X'),
(0x104B0, 'M', '𐓘'),
(0x104B1, 'M', '𐓙'),
(0x104B2, 'M', '𐓚'),
(0x104B3, 'M', '𐓛'),
(0x104B4, 'M', '𐓜'),
(0x104B5, 'M', '𐓝'),
]
def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x104B6, 'M', '𐓞'),
(0x104B7, 'M', '𐓟'),
(0x104B8, 'M', '𐓠'),
(0x104B9, 'M', '𐓡'),
(0x104BA, 'M', '𐓢'),
(0x104BB, 'M', '𐓣'),
(0x104BC, 'M', '𐓤'),
(0x104BD, 'M', '𐓥'),
(0x104BE, 'M', '𐓦'),
(0x104BF, 'M', '𐓧'),
(0x104C0, 'M', '𐓨'),
(0x104C1, 'M', '𐓩'),
(0x104C2, 'M', '𐓪'),
(0x104C3, 'M', '𐓫'),
(0x104C4, 'M', '𐓬'),
(0x104C5, 'M', '𐓭'),
(0x104C6, 'M', '𐓮'),
(0x104C7, 'M', '𐓯'),
(0x104C8, 'M', '𐓰'),
(0x104C9, 'M', '𐓱'),
(0x104CA, 'M', '𐓲'),
(0x104CB, 'M', '𐓳'),
(0x104CC, 'M', '𐓴'),
(0x104CD, 'M', '𐓵'),
(0x104CE, 'M', '𐓶'),
(0x104CF, 'M', '𐓷'),
(0x104D0, 'M', '𐓸'),
(0x104D1, 'M', '𐓹'),
(0x104D2, 'M', '𐓺'),
(0x104D3, 'M', '𐓻'),
(0x104D4, 'X'),
(0x104D8, 'V'),
(0x104FC, 'X'),
(0x10500, 'V'),
(0x10528, 'X'),
(0x10530, 'V'),
(0x10564, 'X'),
(0x1056F, 'V'),
(0x10570, 'M', '𐖗'),
(0x10571, 'M', '𐖘'),
(0x10572, 'M', '𐖙'),
(0x10573, 'M', '𐖚'),
(0x10574, 'M', '𐖛'),
(0x10575, 'M', '𐖜'),
(0x10576, 'M', '𐖝'),
(0x10577, 'M', '𐖞'),
(0x10578, 'M', '𐖟'),
(0x10579, 'M', '𐖠'),
(0x1057A, 'M', '𐖡'),
(0x1057B, 'X'),
(0x1057C, 'M', '𐖣'),
(0x1057D, 'M', '𐖤'),
(0x1057E, 'M', '𐖥'),
(0x1057F, 'M', '𐖦'),
(0x10580, 'M', '𐖧'),
(0x10581, 'M', '𐖨'),
(0x10582, 'M', '𐖩'),
(0x10583, 'M', '𐖪'),
(0x10584, 'M', '𐖫'),
(0x10585, 'M', '𐖬'),
(0x10586, 'M', '𐖭'),
(0x10587, 'M', '𐖮'),
(0x10588, 'M', '𐖯'),
(0x10589, 'M', '𐖰'),
(0x1058A, 'M', '𐖱'),
(0x1058B, 'X'),
(0x1058C, 'M', '𐖳'),
(0x1058D, 'M', '𐖴'),
(0x1058E, 'M', '𐖵'),
(0x1058F, 'M', '𐖶'),
(0x10590, 'M', '𐖷'),
(0x10591, 'M', '𐖸'),
(0x10592, 'M', '𐖹'),
(0x10593, 'X'),
(0x10594, 'M', '𐖻'),
(0x10595, 'M', '𐖼'),
(0x10596, 'X'),
(0x10597, 'V'),
(0x105A2, 'X'),
(0x105A3, 'V'),
(0x105B2, 'X'),
(0x105B3, 'V'),
(0x105BA, 'X'),
(0x105BB, 'V'),
(0x105BD, 'X'),
(0x10600, 'V'),
(0x10737, 'X'),
(0x10740, 'V'),
(0x10756, 'X'),
(0x10760, 'V'),
(0x10768, 'X'),
(0x10780, 'V'),
(0x10781, 'M', 'ː'),
(0x10782, 'M', 'ˑ'),
(0x10783, 'M', 'æ'),
(0x10784, 'M', 'ʙ'),
(0x10785, 'M', 'ɓ'),
(0x10786, 'X'),
(0x10787, 'M', 'ʣ'),
(0x10788, 'M', 'ꭦ'),
]
def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x10789, 'M', 'ʥ'),
(0x1078A, 'M', 'ʤ'),
(0x1078B, 'M', 'ɖ'),
(0x1078C, 'M', 'ɗ'),
(0x1078D, 'M', 'ᶑ'),
(0x1078E, 'M', 'ɘ'),
(0x1078F, 'M', 'ɞ'),
(0x10790, 'M', 'ʩ'),
(0x10791, 'M', 'ɤ'),
(0x10792, 'M', 'ɢ'),
(0x10793, 'M', 'ɠ'),
(0x10794, 'M', 'ʛ'),
(0x10795, 'M', 'ħ'),
(0x10796, 'M', 'ʜ'),
(0x10797, 'M', 'ɧ'),
(0x10798, 'M', 'ʄ'),
(0x10799, 'M', 'ʪ'),
(0x1079A, 'M', 'ʫ'),
(0x1079B, 'M', 'ɬ'),
(0x1079C, 'M', '𝼄'),
(0x1079D, 'M', 'ꞎ'),
(0x1079E, 'M', 'ɮ'),
(0x1079F, 'M', '𝼅'),
(0x107A0, 'M', 'ʎ'),
(0x107A1, 'M', '𝼆'),
(0x107A2, 'M', 'ø'),
(0x107A3, 'M', 'ɶ'),
(0x107A4, 'M', 'ɷ'),
(0x107A5, 'M', 'q'),
(0x107A6, 'M', 'ɺ'),
(0x107A7, 'M', '𝼈'),
(0x107A8, 'M', 'ɽ'),
(0x107A9, 'M', 'ɾ'),
(0x107AA, 'M', 'ʀ'),
(0x107AB, 'M', 'ʨ'),
(0x107AC, 'M', 'ʦ'),
(0x107AD, 'M', 'ꭧ'),
(0x107AE, 'M', 'ʧ'),
(0x107AF, 'M', 'ʈ'),
(0x107B0, 'M', 'ⱱ'),
(0x107B1, 'X'),
(0x107B2, 'M', 'ʏ'),
(0x107B3, 'M', 'ʡ'),
(0x107B4, 'M', 'ʢ'),
(0x107B5, 'M', 'ʘ'),
(0x107B6, 'M', 'ǀ'),
(0x107B7, 'M', 'ǁ'),
(0x107B8, 'M', 'ǂ'),
(0x107B9, 'M', '𝼊'),
(0x107BA, 'M', '𝼞'),
(0x107BB, 'X'),
(0x10800, 'V'),
(0x10806, 'X'),
(0x10808, 'V'),
(0x10809, 'X'),
(0x1080A, 'V'),
(0x10836, 'X'),
(0x10837, 'V'),
(0x10839, 'X'),
(0x1083C, 'V'),
(0x1083D, 'X'),
(0x1083F, 'V'),
(0x10856, 'X'),
(0x10857, 'V'),
(0x1089F, 'X'),
(0x108A7, 'V'),
(0x108B0, 'X'),
(0x108E0, 'V'),
(0x108F3, 'X'),
(0x108F4, 'V'),
(0x108F6, 'X'),
(0x108FB, 'V'),
(0x1091C, 'X'),
(0x1091F, 'V'),
(0x1093A, 'X'),
(0x1093F, 'V'),
(0x10940, 'X'),
(0x10980, 'V'),
(0x109B8, 'X'),
(0x109BC, 'V'),
(0x109D0, 'X'),
(0x109D2, 'V'),
(0x10A04, 'X'),
(0x10A05, 'V'),
(0x10A07, 'X'),
(0x10A0C, 'V'),
(0x10A14, 'X'),
(0x10A15, 'V'),
(0x10A18, 'X'),
(0x10A19, 'V'),
(0x10A36, 'X'),
(0x10A38, 'V'),
(0x10A3B, 'X'),
(0x10A3F, 'V'),
(0x10A49, 'X'),
(0x10A50, 'V'),
(0x10A59, 'X'),
(0x10A60, 'V'),
(0x10AA0, 'X'),
(0x10AC0, 'V'),
]
def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x10AE7, 'X'),
(0x10AEB, 'V'),
(0x10AF7, 'X'),
(0x10B00, 'V'),
(0x10B36, 'X'),
(0x10B39, 'V'),
(0x10B56, 'X'),
(0x10B58, 'V'),
(0x10B73, 'X'),
(0x10B78, 'V'),
(0x10B92, 'X'),
(0x10B99, 'V'),
(0x10B9D, 'X'),
(0x10BA9, 'V'),
(0x10BB0, 'X'),
(0x10C00, 'V'),
(0x10C49, 'X'),
(0x10C80, 'M', '𐳀'),
(0x10C81, 'M', '𐳁'),
(0x10C82, 'M', '𐳂'),
(0x10C83, 'M', '𐳃'),
(0x10C84, 'M', '𐳄'),
(0x10C85, 'M', '𐳅'),
(0x10C86, 'M', '𐳆'),
(0x10C87, 'M', '𐳇'),
(0x10C88, 'M', '𐳈'),
(0x10C89, 'M', '𐳉'),
(0x10C8A, 'M', '𐳊'),
(0x10C8B, 'M', '𐳋'),
(0x10C8C, 'M', '𐳌'),
(0x10C8D, 'M', '𐳍'),
(0x10C8E, 'M', '𐳎'),
(0x10C8F, 'M', '𐳏'),
(0x10C90, 'M', '𐳐'),
(0x10C91, 'M', '𐳑'),
(0x10C92, 'M', '𐳒'),
(0x10C93, 'M', '𐳓'),
(0x10C94, 'M', '𐳔'),
(0x10C95, 'M', '𐳕'),
(0x10C96, 'M', '𐳖'),
(0x10C97, 'M', '𐳗'),
(0x10C98, 'M', '𐳘'),
(0x10C99, 'M', '𐳙'),
(0x10C9A, 'M', '𐳚'),
(0x10C9B, 'M', '𐳛'),
(0x10C9C, 'M', '𐳜'),
(0x10C9D, 'M', '𐳝'),
(0x10C9E, 'M', '𐳞'),
(0x10C9F, 'M', '𐳟'),
(0x10CA0, 'M', '𐳠'),
(0x10CA1, 'M', '𐳡'),
(0x10CA2, 'M', '𐳢'),
(0x10CA3, 'M', '𐳣'),
(0x10CA4, 'M', '𐳤'),
(0x10CA5, 'M', '𐳥'),
(0x10CA6, 'M', '𐳦'),
(0x10CA7, 'M', '𐳧'),
(0x10CA8, 'M', '𐳨'),
(0x10CA9, 'M', '𐳩'),
(0x10CAA, 'M', '𐳪'),
(0x10CAB, 'M', '𐳫'),
(0x10CAC, 'M', '𐳬'),
(0x10CAD, 'M', '𐳭'),
(0x10CAE, 'M', '𐳮'),
(0x10CAF, 'M', '𐳯'),
(0x10CB0, 'M', '𐳰'),
(0x10CB1, 'M', '𐳱'),
(0x10CB2, 'M', '𐳲'),
(0x10CB3, 'X'),
(0x10CC0, 'V'),
(0x10CF3, 'X'),
(0x10CFA, 'V'),
(0x10D28, 'X'),
(0x10D30, 'V'),
(0x10D3A, 'X'),
(0x10E60, 'V'),
(0x10E7F, 'X'),
(0x10E80, 'V'),
(0x10EAA, 'X'),
(0x10EAB, 'V'),
(0x10EAE, 'X'),
(0x10EB0, 'V'),
(0x10EB2, 'X'),
(0x10EFD, 'V'),
(0x10F28, 'X'),
(0x10F30, 'V'),
(0x10F5A, 'X'),
(0x10F70, 'V'),
(0x10F8A, 'X'),
(0x10FB0, 'V'),
(0x10FCC, 'X'),
(0x10FE0, 'V'),
(0x10FF7, 'X'),
(0x11000, 'V'),
(0x1104E, 'X'),
(0x11052, 'V'),
(0x11076, 'X'),
(0x1107F, 'V'),
(0x110BD, 'X'),
(0x110BE, 'V'),
]
def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x110C3, 'X'),
(0x110D0, 'V'),
(0x110E9, 'X'),
(0x110F0, 'V'),
(0x110FA, 'X'),
(0x11100, 'V'),
(0x11135, 'X'),
(0x11136, 'V'),
(0x11148, 'X'),
(0x11150, 'V'),
(0x11177, 'X'),
(0x11180, 'V'),
(0x111E0, 'X'),
(0x111E1, 'V'),
(0x111F5, 'X'),
(0x11200, 'V'),
(0x11212, 'X'),
(0x11213, 'V'),
(0x11242, 'X'),
(0x11280, 'V'),
(0x11287, 'X'),
(0x11288, 'V'),
(0x11289, 'X'),
(0x1128A, 'V'),
(0x1128E, 'X'),
(0x1128F, 'V'),
(0x1129E, 'X'),
(0x1129F, 'V'),
(0x112AA, 'X'),
(0x112B0, 'V'),
(0x112EB, 'X'),
(0x112F0, 'V'),
(0x112FA, 'X'),
(0x11300, 'V'),
(0x11304, 'X'),
(0x11305, 'V'),
(0x1130D, 'X'),
(0x1130F, 'V'),
(0x11311, 'X'),
(0x11313, 'V'),
(0x11329, 'X'),
(0x1132A, 'V'),
(0x11331, 'X'),
(0x11332, 'V'),
(0x11334, 'X'),
(0x11335, 'V'),
(0x1133A, 'X'),
(0x1133B, 'V'),
(0x11345, 'X'),
(0x11347, 'V'),
(0x11349, 'X'),
(0x1134B, 'V'),
(0x1134E, 'X'),
(0x11350, 'V'),
(0x11351, 'X'),
(0x11357, 'V'),
(0x11358, 'X'),
(0x1135D, 'V'),
(0x11364, 'X'),
(0x11366, 'V'),
(0x1136D, 'X'),
(0x11370, 'V'),
(0x11375, 'X'),
(0x11400, 'V'),
(0x1145C, 'X'),
(0x1145D, 'V'),
(0x11462, 'X'),
(0x11480, 'V'),
(0x114C8, 'X'),
(0x114D0, 'V'),
(0x114DA, 'X'),
(0x11580, 'V'),
(0x115B6, 'X'),
(0x115B8, 'V'),
(0x115DE, 'X'),
(0x11600, 'V'),
(0x11645, 'X'),
(0x11650, 'V'),
(0x1165A, 'X'),
(0x11660, 'V'),
(0x1166D, 'X'),
(0x11680, 'V'),
(0x116BA, 'X'),
(0x116C0, 'V'),
(0x116CA, 'X'),
(0x11700, 'V'),
(0x1171B, 'X'),
(0x1171D, 'V'),
(0x1172C, 'X'),
(0x11730, 'V'),
(0x11747, 'X'),
(0x11800, 'V'),
(0x1183C, 'X'),
(0x118A0, 'M', '𑣀'),
(0x118A1, 'M', '𑣁'),
(0x118A2, 'M', '𑣂'),
(0x118A3, 'M', '𑣃'),
(0x118A4, 'M', '𑣄'),
(0x118A5, 'M', '𑣅'),
(0x118A6, 'M', '𑣆'),
]
def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x118A7, 'M', '𑣇'),
(0x118A8, 'M', '𑣈'),
(0x118A9, 'M', '𑣉'),
(0x118AA, 'M', '𑣊'),
(0x118AB, 'M', '𑣋'),
(0x118AC, 'M', '𑣌'),
(0x118AD, 'M', '𑣍'),
(0x118AE, 'M', '𑣎'),
(0x118AF, 'M', '𑣏'),
(0x118B0, 'M', '𑣐'),
(0x118B1, 'M', '𑣑'),
(0x118B2, 'M', '𑣒'),
(0x118B3, 'M', '𑣓'),
(0x118B4, 'M', '𑣔'),
(0x118B5, 'M', '𑣕'),
(0x118B6, 'M', '𑣖'),
(0x118B7, 'M', '𑣗'),
(0x118B8, 'M', '𑣘'),
(0x118B9, 'M', '𑣙'),
(0x118BA, 'M', '𑣚'),
(0x118BB, 'M', '𑣛'),
(0x118BC, 'M', '𑣜'),
(0x118BD, 'M', '𑣝'),
(0x118BE, 'M', '𑣞'),
(0x118BF, 'M', '𑣟'),
(0x118C0, 'V'),
(0x118F3, 'X'),
(0x118FF, 'V'),
(0x11907, 'X'),
(0x11909, 'V'),
(0x1190A, 'X'),
(0x1190C, 'V'),
(0x11914, 'X'),
(0x11915, 'V'),
(0x11917, 'X'),
(0x11918, 'V'),
(0x11936, 'X'),
(0x11937, 'V'),
(0x11939, 'X'),
(0x1193B, 'V'),
(0x11947, 'X'),
(0x11950, 'V'),
(0x1195A, 'X'),
(0x119A0, 'V'),
(0x119A8, 'X'),
(0x119AA, 'V'),
(0x119D8, 'X'),
(0x119DA, 'V'),
(0x119E5, 'X'),
(0x11A00, 'V'),
(0x11A48, 'X'),
(0x11A50, 'V'),
(0x11AA3, 'X'),
(0x11AB0, 'V'),
(0x11AF9, 'X'),
(0x11B00, 'V'),
(0x11B0A, 'X'),
(0x11C00, 'V'),
(0x11C09, 'X'),
(0x11C0A, 'V'),
(0x11C37, 'X'),
(0x11C38, 'V'),
(0x11C46, 'X'),
(0x11C50, 'V'),
(0x11C6D, 'X'),
(0x11C70, 'V'),
(0x11C90, 'X'),
(0x11C92, 'V'),
(0x11CA8, 'X'),
(0x11CA9, 'V'),
(0x11CB7, 'X'),
(0x11D00, 'V'),
(0x11D07, 'X'),
(0x11D08, 'V'),
(0x11D0A, 'X'),
(0x11D0B, 'V'),
(0x11D37, 'X'),
(0x11D3A, 'V'),
(0x11D3B, 'X'),
(0x11D3C, 'V'),
(0x11D3E, 'X'),
(0x11D3F, 'V'),
(0x11D48, 'X'),
(0x11D50, 'V'),
(0x11D5A, 'X'),
(0x11D60, 'V'),
(0x11D66, 'X'),
(0x11D67, 'V'),
(0x11D69, 'X'),
(0x11D6A, 'V'),
(0x11D8F, 'X'),
(0x11D90, 'V'),
(0x11D92, 'X'),
(0x11D93, 'V'),
(0x11D99, 'X'),
(0x11DA0, 'V'),
(0x11DAA, 'X'),
(0x11EE0, 'V'),
(0x11EF9, 'X'),
(0x11F00, 'V'),
]
def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x11F11, 'X'),
(0x11F12, 'V'),
(0x11F3B, 'X'),
(0x11F3E, 'V'),
(0x11F5A, 'X'),
(0x11FB0, 'V'),
(0x11FB1, 'X'),
(0x11FC0, 'V'),
(0x11FF2, 'X'),
(0x11FFF, 'V'),
(0x1239A, 'X'),
(0x12400, 'V'),
(0x1246F, 'X'),
(0x12470, 'V'),
(0x12475, 'X'),
(0x12480, 'V'),
(0x12544, 'X'),
(0x12F90, 'V'),
(0x12FF3, 'X'),
(0x13000, 'V'),
(0x13430, 'X'),
(0x13440, 'V'),
(0x13456, 'X'),
(0x14400, 'V'),
(0x14647, 'X'),
(0x16800, 'V'),
(0x16A39, 'X'),
(0x16A40, 'V'),
(0x16A5F, 'X'),
(0x16A60, 'V'),
(0x16A6A, 'X'),
(0x16A6E, 'V'),
(0x16ABF, 'X'),
(0x16AC0, 'V'),
(0x16ACA, 'X'),
(0x16AD0, 'V'),
(0x16AEE, 'X'),
(0x16AF0, 'V'),
(0x16AF6, 'X'),
(0x16B00, 'V'),
(0x16B46, 'X'),
(0x16B50, 'V'),
(0x16B5A, 'X'),
(0x16B5B, 'V'),
(0x16B62, 'X'),
(0x16B63, 'V'),
(0x16B78, 'X'),
(0x16B7D, 'V'),
(0x16B90, 'X'),
(0x16E40, 'M', '𖹠'),
(0x16E41, 'M', '𖹡'),
(0x16E42, 'M', '𖹢'),
(0x16E43, 'M', '𖹣'),
(0x16E44, 'M', '𖹤'),
(0x16E45, 'M', '𖹥'),
(0x16E46, 'M', '𖹦'),
(0x16E47, 'M', '𖹧'),
(0x16E48, 'M', '𖹨'),
(0x16E49, 'M', '𖹩'),
(0x16E4A, 'M', '𖹪'),
(0x16E4B, 'M', '𖹫'),
(0x16E4C, 'M', '𖹬'),
(0x16E4D, 'M', '𖹭'),
(0x16E4E, 'M', '𖹮'),
(0x16E4F, 'M', '𖹯'),
(0x16E50, 'M', '𖹰'),
(0x16E51, 'M', '𖹱'),
(0x16E52, 'M', '𖹲'),
(0x16E53, 'M', '𖹳'),
(0x16E54, 'M', '𖹴'),
(0x16E55, 'M', '𖹵'),
(0x16E56, 'M', '𖹶'),
(0x16E57, 'M', '𖹷'),
(0x16E58, 'M', '𖹸'),
(0x16E59, 'M', '𖹹'),
(0x16E5A, 'M', '𖹺'),
(0x16E5B, 'M', '𖹻'),
(0x16E5C, 'M', '𖹼'),
(0x16E5D, 'M', '𖹽'),
(0x16E5E, 'M', '𖹾'),
(0x16E5F, 'M', '𖹿'),
(0x16E60, 'V'),
(0x16E9B, 'X'),
(0x16F00, 'V'),
(0x16F4B, 'X'),
(0x16F4F, 'V'),
(0x16F88, 'X'),
(0x16F8F, 'V'),
(0x16FA0, 'X'),
(0x16FE0, 'V'),
(0x16FE5, 'X'),
(0x16FF0, 'V'),
(0x16FF2, 'X'),
(0x17000, 'V'),
(0x187F8, 'X'),
(0x18800, 'V'),
(0x18CD6, 'X'),
(0x18D00, 'V'),
(0x18D09, 'X'),
(0x1AFF0, 'V'),
]
def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1AFF4, 'X'),
(0x1AFF5, 'V'),
(0x1AFFC, 'X'),
(0x1AFFD, 'V'),
(0x1AFFF, 'X'),
(0x1B000, 'V'),
(0x1B123, 'X'),
(0x1B132, 'V'),
(0x1B133, 'X'),
(0x1B150, 'V'),
(0x1B153, 'X'),
(0x1B155, 'V'),
(0x1B156, 'X'),
(0x1B164, 'V'),
(0x1B168, 'X'),
(0x1B170, 'V'),
(0x1B2FC, 'X'),
(0x1BC00, 'V'),
(0x1BC6B, 'X'),
(0x1BC70, 'V'),
(0x1BC7D, 'X'),
(0x1BC80, 'V'),
(0x1BC89, 'X'),
(0x1BC90, 'V'),
(0x1BC9A, 'X'),
(0x1BC9C, 'V'),
(0x1BCA0, 'I'),
(0x1BCA4, 'X'),
(0x1CF00, 'V'),
(0x1CF2E, 'X'),
(0x1CF30, 'V'),
(0x1CF47, 'X'),
(0x1CF50, 'V'),
(0x1CFC4, 'X'),
(0x1D000, 'V'),
(0x1D0F6, 'X'),
(0x1D100, 'V'),
(0x1D127, 'X'),
(0x1D129, 'V'),
(0x1D15E, 'M', '𝅗𝅥'),
(0x1D15F, 'M', '𝅘𝅥'),
(0x1D160, 'M', '𝅘𝅥𝅮'),
(0x1D161, 'M', '𝅘𝅥𝅯'),
(0x1D162, 'M', '𝅘𝅥𝅰'),
(0x1D163, 'M', '𝅘𝅥𝅱'),
(0x1D164, 'M', '𝅘𝅥𝅲'),
(0x1D165, 'V'),
(0x1D173, 'X'),
(0x1D17B, 'V'),
(0x1D1BB, 'M', '𝆹𝅥'),
(0x1D1BC, 'M', '𝆺𝅥'),
(0x1D1BD, 'M', '𝆹𝅥𝅮'),
(0x1D1BE, 'M', '𝆺𝅥𝅮'),
(0x1D1BF, 'M', '𝆹𝅥𝅯'),
(0x1D1C0, 'M', '𝆺𝅥𝅯'),
(0x1D1C1, 'V'),
(0x1D1EB, 'X'),
(0x1D200, 'V'),
(0x1D246, 'X'),
(0x1D2C0, 'V'),
(0x1D2D4, 'X'),
(0x1D2E0, 'V'),
(0x1D2F4, 'X'),
(0x1D300, 'V'),
(0x1D357, 'X'),
(0x1D360, 'V'),
(0x1D379, 'X'),
(0x1D400, 'M', 'a'),
(0x1D401, 'M', 'b'),
(0x1D402, 'M', 'c'),
(0x1D403, 'M', 'd'),
(0x1D404, 'M', 'e'),
(0x1D405, 'M', 'f'),
(0x1D406, 'M', 'g'),
(0x1D407, 'M', 'h'),
(0x1D408, 'M', 'i'),
(0x1D409, 'M', 'j'),
(0x1D40A, 'M', 'k'),
(0x1D40B, 'M', 'l'),
(0x1D40C, 'M', 'm'),
(0x1D40D, 'M', 'n'),
(0x1D40E, 'M', 'o'),
(0x1D40F, 'M', 'p'),
(0x1D410, 'M', 'q'),
(0x1D411, 'M', 'r'),
(0x1D412, 'M', 's'),
(0x1D413, 'M', 't'),
(0x1D414, 'M', 'u'),
(0x1D415, 'M', 'v'),
(0x1D416, 'M', 'w'),
(0x1D417, 'M', 'x'),
(0x1D418, 'M', 'y'),
(0x1D419, 'M', 'z'),
(0x1D41A, 'M', 'a'),
(0x1D41B, 'M', 'b'),
(0x1D41C, 'M', 'c'),
(0x1D41D, 'M', 'd'),
(0x1D41E, 'M', 'e'),
(0x1D41F, 'M', 'f'),
(0x1D420, 'M', 'g'),
]
def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D421, 'M', 'h'),
(0x1D422, 'M', 'i'),
(0x1D423, 'M', 'j'),
(0x1D424, 'M', 'k'),
(0x1D425, 'M', 'l'),
(0x1D426, 'M', 'm'),
(0x1D427, 'M', 'n'),
(0x1D428, 'M', 'o'),
(0x1D429, 'M', 'p'),
(0x1D42A, 'M', 'q'),
(0x1D42B, 'M', 'r'),
(0x1D42C, 'M', 's'),
(0x1D42D, 'M', 't'),
(0x1D42E, 'M', 'u'),
(0x1D42F, 'M', 'v'),
(0x1D430, 'M', 'w'),
(0x1D431, 'M', 'x'),
(0x1D432, 'M', 'y'),
(0x1D433, 'M', 'z'),
(0x1D434, 'M', 'a'),
(0x1D435, 'M', 'b'),
(0x1D436, 'M', 'c'),
(0x1D437, 'M', 'd'),
(0x1D438, 'M', 'e'),
(0x1D439, 'M', 'f'),
(0x1D43A, 'M', 'g'),
(0x1D43B, 'M', 'h'),
(0x1D43C, 'M', 'i'),
(0x1D43D, 'M', 'j'),
(0x1D43E, 'M', 'k'),
(0x1D43F, 'M', 'l'),
(0x1D440, 'M', 'm'),
(0x1D441, 'M', 'n'),
(0x1D442, 'M', 'o'),
(0x1D443, 'M', 'p'),
(0x1D444, 'M', 'q'),
(0x1D445, 'M', 'r'),
(0x1D446, 'M', 's'),
(0x1D447, 'M', 't'),
(0x1D448, 'M', 'u'),
(0x1D449, 'M', 'v'),
(0x1D44A, 'M', 'w'),
(0x1D44B, 'M', 'x'),
(0x1D44C, 'M', 'y'),
(0x1D44D, 'M', 'z'),
(0x1D44E, 'M', 'a'),
(0x1D44F, 'M', 'b'),
(0x1D450, 'M', 'c'),
(0x1D451, 'M', 'd'),
(0x1D452, 'M', 'e'),
(0x1D453, 'M', 'f'),
(0x1D454, 'M', 'g'),
(0x1D455, 'X'),
(0x1D456, 'M', 'i'),
(0x1D457, 'M', 'j'),
(0x1D458, 'M', 'k'),
(0x1D459, 'M', 'l'),
(0x1D45A, 'M', 'm'),
(0x1D45B, 'M', 'n'),
(0x1D45C, 'M', 'o'),
(0x1D45D, 'M', 'p'),
(0x1D45E, 'M', 'q'),
(0x1D45F, 'M', 'r'),
(0x1D460, 'M', 's'),
(0x1D461, 'M', 't'),
(0x1D462, 'M', 'u'),
(0x1D463, 'M', 'v'),
(0x1D464, 'M', 'w'),
(0x1D465, 'M', 'x'),
(0x1D466, 'M', 'y'),
(0x1D467, 'M', 'z'),
(0x1D468, 'M', 'a'),
(0x1D469, 'M', 'b'),
(0x1D46A, 'M', 'c'),
(0x1D46B, 'M', 'd'),
(0x1D46C, 'M', 'e'),
(0x1D46D, 'M', 'f'),
(0x1D46E, 'M', 'g'),
(0x1D46F, 'M', 'h'),
(0x1D470, 'M', 'i'),
(0x1D471, 'M', 'j'),
(0x1D472, 'M', 'k'),
(0x1D473, 'M', 'l'),
(0x1D474, 'M', 'm'),
(0x1D475, 'M', 'n'),
(0x1D476, 'M', 'o'),
(0x1D477, 'M', 'p'),
(0x1D478, 'M', 'q'),
(0x1D479, 'M', 'r'),
(0x1D47A, 'M', 's'),
(0x1D47B, 'M', 't'),
(0x1D47C, 'M', 'u'),
(0x1D47D, 'M', 'v'),
(0x1D47E, 'M', 'w'),
(0x1D47F, 'M', 'x'),
(0x1D480, 'M', 'y'),
(0x1D481, 'M', 'z'),
(0x1D482, 'M', 'a'),
(0x1D483, 'M', 'b'),
(0x1D484, 'M', 'c'),
]
def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D485, 'M', 'd'),
(0x1D486, 'M', 'e'),
(0x1D487, 'M', 'f'),
(0x1D488, 'M', 'g'),
(0x1D489, 'M', 'h'),
(0x1D48A, 'M', 'i'),
(0x1D48B, 'M', 'j'),
(0x1D48C, 'M', 'k'),
(0x1D48D, 'M', 'l'),
(0x1D48E, 'M', 'm'),
(0x1D48F, 'M', 'n'),
(0x1D490, 'M', 'o'),
(0x1D491, 'M', 'p'),
(0x1D492, 'M', 'q'),
(0x1D493, 'M', 'r'),
(0x1D494, 'M', 's'),
(0x1D495, 'M', 't'),
(0x1D496, 'M', 'u'),
(0x1D497, 'M', 'v'),
(0x1D498, 'M', 'w'),
(0x1D499, 'M', 'x'),
(0x1D49A, 'M', 'y'),
(0x1D49B, 'M', 'z'),
(0x1D49C, 'M', 'a'),
(0x1D49D, 'X'),
(0x1D49E, 'M', 'c'),
(0x1D49F, 'M', 'd'),
(0x1D4A0, 'X'),
(0x1D4A2, 'M', 'g'),
(0x1D4A3, 'X'),
(0x1D4A5, 'M', 'j'),
(0x1D4A6, 'M', 'k'),
(0x1D4A7, 'X'),
(0x1D4A9, 'M', 'n'),
(0x1D4AA, 'M', 'o'),
(0x1D4AB, 'M', 'p'),
(0x1D4AC, 'M', 'q'),
(0x1D4AD, 'X'),
(0x1D4AE, 'M', 's'),
(0x1D4AF, 'M', 't'),
(0x1D4B0, 'M', 'u'),
(0x1D4B1, 'M', 'v'),
(0x1D4B2, 'M', 'w'),
(0x1D4B3, 'M', 'x'),
(0x1D4B4, 'M', 'y'),
(0x1D4B5, 'M', 'z'),
(0x1D4B6, 'M', 'a'),
(0x1D4B7, 'M', 'b'),
(0x1D4B8, 'M', 'c'),
(0x1D4B9, 'M', 'd'),
(0x1D4BA, 'X'),
(0x1D4BB, 'M', 'f'),
(0x1D4BC, 'X'),
(0x1D4BD, 'M', 'h'),
(0x1D4BE, 'M', 'i'),
(0x1D4BF, 'M', 'j'),
(0x1D4C0, 'M', 'k'),
(0x1D4C1, 'M', 'l'),
(0x1D4C2, 'M', 'm'),
(0x1D4C3, 'M', 'n'),
(0x1D4C4, 'X'),
(0x1D4C5, 'M', 'p'),
(0x1D4C6, 'M', 'q'),
(0x1D4C7, 'M', 'r'),
(0x1D4C8, 'M', 's'),
(0x1D4C9, 'M', 't'),
(0x1D4CA, 'M', 'u'),
(0x1D4CB, 'M', 'v'),
(0x1D4CC, 'M', 'w'),
(0x1D4CD, 'M', 'x'),
(0x1D4CE, 'M', 'y'),
(0x1D4CF, 'M', 'z'),
(0x1D4D0, 'M', 'a'),
(0x1D4D1, 'M', 'b'),
(0x1D4D2, 'M', 'c'),
(0x1D4D3, 'M', 'd'),
(0x1D4D4, 'M', 'e'),
(0x1D4D5, 'M', 'f'),
(0x1D4D6, 'M', 'g'),
(0x1D4D7, 'M', 'h'),
(0x1D4D8, 'M', 'i'),
(0x1D4D9, 'M', 'j'),
(0x1D4DA, 'M', 'k'),
(0x1D4DB, 'M', 'l'),
(0x1D4DC, 'M', 'm'),
(0x1D4DD, 'M', 'n'),
(0x1D4DE, 'M', 'o'),
(0x1D4DF, 'M', 'p'),
(0x1D4E0, 'M', 'q'),
(0x1D4E1, 'M', 'r'),
(0x1D4E2, 'M', 's'),
(0x1D4E3, 'M', 't'),
(0x1D4E4, 'M', 'u'),
(0x1D4E5, 'M', 'v'),
(0x1D4E6, 'M', 'w'),
(0x1D4E7, 'M', 'x'),
(0x1D4E8, 'M', 'y'),
(0x1D4E9, 'M', 'z'),
(0x1D4EA, 'M', 'a'),
(0x1D4EB, 'M', 'b'),
]
def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D4EC, 'M', 'c'),
(0x1D4ED, 'M', 'd'),
(0x1D4EE, 'M', 'e'),
(0x1D4EF, 'M', 'f'),
(0x1D4F0, 'M', 'g'),
(0x1D4F1, 'M', 'h'),
(0x1D4F2, 'M', 'i'),
(0x1D4F3, 'M', 'j'),
(0x1D4F4, 'M', 'k'),
(0x1D4F5, 'M', 'l'),
(0x1D4F6, 'M', 'm'),
(0x1D4F7, 'M', 'n'),
(0x1D4F8, 'M', 'o'),
(0x1D4F9, 'M', 'p'),
(0x1D4FA, 'M', 'q'),
(0x1D4FB, 'M', 'r'),
(0x1D4FC, 'M', 's'),
(0x1D4FD, 'M', 't'),
(0x1D4FE, 'M', 'u'),
(0x1D4FF, 'M', 'v'),
(0x1D500, 'M', 'w'),
(0x1D501, 'M', 'x'),
(0x1D502, 'M', 'y'),
(0x1D503, 'M', 'z'),
(0x1D504, 'M', 'a'),
(0x1D505, 'M', 'b'),
(0x1D506, 'X'),
(0x1D507, 'M', 'd'),
(0x1D508, 'M', 'e'),
(0x1D509, 'M', 'f'),
(0x1D50A, 'M', 'g'),
(0x1D50B, 'X'),
(0x1D50D, 'M', 'j'),
(0x1D50E, 'M', 'k'),
(0x1D50F, 'M', 'l'),
(0x1D510, 'M', 'm'),
(0x1D511, 'M', 'n'),
(0x1D512, 'M', 'o'),
(0x1D513, 'M', 'p'),
(0x1D514, 'M', 'q'),
(0x1D515, 'X'),
(0x1D516, 'M', 's'),
(0x1D517, 'M', 't'),
(0x1D518, 'M', 'u'),
(0x1D519, 'M', 'v'),
(0x1D51A, 'M', 'w'),
(0x1D51B, 'M', 'x'),
(0x1D51C, 'M', 'y'),
(0x1D51D, 'X'),
(0x1D51E, 'M', 'a'),
(0x1D51F, 'M', 'b'),
(0x1D520, 'M', 'c'),
(0x1D521, 'M', 'd'),
(0x1D522, 'M', 'e'),
(0x1D523, 'M', 'f'),
(0x1D524, 'M', 'g'),
(0x1D525, 'M', 'h'),
(0x1D526, 'M', 'i'),
(0x1D527, 'M', 'j'),
(0x1D528, 'M', 'k'),
(0x1D529, 'M', 'l'),
(0x1D52A, 'M', 'm'),
(0x1D52B, 'M', 'n'),
(0x1D52C, 'M', 'o'),
(0x1D52D, 'M', 'p'),
(0x1D52E, 'M', 'q'),
(0x1D52F, 'M', 'r'),
(0x1D530, 'M', 's'),
(0x1D531, 'M', 't'),
(0x1D532, 'M', 'u'),
(0x1D533, 'M', 'v'),
(0x1D534, 'M', 'w'),
(0x1D535, 'M', 'x'),
(0x1D536, 'M', 'y'),
(0x1D537, 'M', 'z'),
(0x1D538, 'M', 'a'),
(0x1D539, 'M', 'b'),
(0x1D53A, 'X'),
(0x1D53B, 'M', 'd'),
(0x1D53C, 'M', 'e'),
(0x1D53D, 'M', 'f'),
(0x1D53E, 'M', 'g'),
(0x1D53F, 'X'),
(0x1D540, 'M', 'i'),
(0x1D541, 'M', 'j'),
(0x1D542, 'M', 'k'),
(0x1D543, 'M', 'l'),
(0x1D544, 'M', 'm'),
(0x1D545, 'X'),
(0x1D546, 'M', 'o'),
(0x1D547, 'X'),
(0x1D54A, 'M', 's'),
(0x1D54B, 'M', 't'),
(0x1D54C, 'M', 'u'),
(0x1D54D, 'M', 'v'),
(0x1D54E, 'M', 'w'),
(0x1D54F, 'M', 'x'),
(0x1D550, 'M', 'y'),
(0x1D551, 'X'),
(0x1D552, 'M', 'a'),
]
def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D553, 'M', 'b'),
(0x1D554, 'M', 'c'),
(0x1D555, 'M', 'd'),
(0x1D556, 'M', 'e'),
(0x1D557, 'M', 'f'),
(0x1D558, 'M', 'g'),
(0x1D559, 'M', 'h'),
(0x1D55A, 'M', 'i'),
(0x1D55B, 'M', 'j'),
(0x1D55C, 'M', 'k'),
(0x1D55D, 'M', 'l'),
(0x1D55E, 'M', 'm'),
(0x1D55F, 'M', 'n'),
(0x1D560, 'M', 'o'),
(0x1D561, 'M', 'p'),
(0x1D562, 'M', 'q'),
(0x1D563, 'M', 'r'),
(0x1D564, 'M', 's'),
(0x1D565, 'M', 't'),
(0x1D566, 'M', 'u'),
(0x1D567, 'M', 'v'),
(0x1D568, 'M', 'w'),
(0x1D569, 'M', 'x'),
(0x1D56A, 'M', 'y'),
(0x1D56B, 'M', 'z'),
(0x1D56C, 'M', 'a'),
(0x1D56D, 'M', 'b'),
(0x1D56E, 'M', 'c'),
(0x1D56F, 'M', 'd'),
(0x1D570, 'M', 'e'),
(0x1D571, 'M', 'f'),
(0x1D572, 'M', 'g'),
(0x1D573, 'M', 'h'),
(0x1D574, 'M', 'i'),
(0x1D575, 'M', 'j'),
(0x1D576, 'M', 'k'),
(0x1D577, 'M', 'l'),
(0x1D578, 'M', 'm'),
(0x1D579, 'M', 'n'),
(0x1D57A, 'M', 'o'),
(0x1D57B, 'M', 'p'),
(0x1D57C, 'M', 'q'),
(0x1D57D, 'M', 'r'),
(0x1D57E, 'M', 's'),
(0x1D57F, 'M', 't'),
(0x1D580, 'M', 'u'),
(0x1D581, 'M', 'v'),
(0x1D582, 'M', 'w'),
(0x1D583, 'M', 'x'),
(0x1D584, 'M', 'y'),
(0x1D585, 'M', 'z'),
(0x1D586, 'M', 'a'),
(0x1D587, 'M', 'b'),
(0x1D588, 'M', 'c'),
(0x1D589, 'M', 'd'),
(0x1D58A, 'M', 'e'),
(0x1D58B, 'M', 'f'),
(0x1D58C, 'M', 'g'),
(0x1D58D, 'M', 'h'),
(0x1D58E, 'M', 'i'),
(0x1D58F, 'M', 'j'),
(0x1D590, 'M', 'k'),
(0x1D591, 'M', 'l'),
(0x1D592, 'M', 'm'),
(0x1D593, 'M', 'n'),
(0x1D594, 'M', 'o'),
(0x1D595, 'M', 'p'),
(0x1D596, 'M', 'q'),
(0x1D597, 'M', 'r'),
(0x1D598, 'M', 's'),
(0x1D599, 'M', 't'),
(0x1D59A, 'M', 'u'),
(0x1D59B, 'M', 'v'),
(0x1D59C, 'M', 'w'),
(0x1D59D, 'M', 'x'),
(0x1D59E, 'M', 'y'),
(0x1D59F, 'M', 'z'),
(0x1D5A0, 'M', 'a'),
(0x1D5A1, 'M', 'b'),
(0x1D5A2, 'M', 'c'),
(0x1D5A3, 'M', 'd'),
(0x1D5A4, 'M', 'e'),
(0x1D5A5, 'M', 'f'),
(0x1D5A6, 'M', 'g'),
(0x1D5A7, 'M', 'h'),
(0x1D5A8, 'M', 'i'),
(0x1D5A9, 'M', 'j'),
(0x1D5AA, 'M', 'k'),
(0x1D5AB, 'M', 'l'),
(0x1D5AC, 'M', 'm'),
(0x1D5AD, 'M', 'n'),
(0x1D5AE, 'M', 'o'),
(0x1D5AF, 'M', 'p'),
(0x1D5B0, 'M', 'q'),
(0x1D5B1, 'M', 'r'),
(0x1D5B2, 'M', 's'),
(0x1D5B3, 'M', 't'),
(0x1D5B4, 'M', 'u'),
(0x1D5B5, 'M', 'v'),
(0x1D5B6, 'M', 'w'),
]
def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D5B7, 'M', 'x'),
(0x1D5B8, 'M', 'y'),
(0x1D5B9, 'M', 'z'),
(0x1D5BA, 'M', 'a'),
(0x1D5BB, 'M', 'b'),
(0x1D5BC, 'M', 'c'),
(0x1D5BD, 'M', 'd'),
(0x1D5BE, 'M', 'e'),
(0x1D5BF, 'M', 'f'),
(0x1D5C0, 'M', 'g'),
(0x1D5C1, 'M', 'h'),
(0x1D5C2, 'M', 'i'),
(0x1D5C3, 'M', 'j'),
(0x1D5C4, 'M', 'k'),
(0x1D5C5, 'M', 'l'),
(0x1D5C6, 'M', 'm'),
(0x1D5C7, 'M', 'n'),
(0x1D5C8, 'M', 'o'),
(0x1D5C9, 'M', 'p'),
(0x1D5CA, 'M', 'q'),
(0x1D5CB, 'M', 'r'),
(0x1D5CC, 'M', 's'),
(0x1D5CD, 'M', 't'),
(0x1D5CE, 'M', 'u'),
(0x1D5CF, 'M', 'v'),
(0x1D5D0, 'M', 'w'),
(0x1D5D1, 'M', 'x'),
(0x1D5D2, 'M', 'y'),
(0x1D5D3, 'M', 'z'),
(0x1D5D4, 'M', 'a'),
(0x1D5D5, 'M', 'b'),
(0x1D5D6, 'M', 'c'),
(0x1D5D7, 'M', 'd'),
(0x1D5D8, 'M', 'e'),
(0x1D5D9, 'M', 'f'),
(0x1D5DA, 'M', 'g'),
(0x1D5DB, 'M', 'h'),
(0x1D5DC, 'M', 'i'),
(0x1D5DD, 'M', 'j'),
(0x1D5DE, 'M', 'k'),
(0x1D5DF, 'M', 'l'),
(0x1D5E0, 'M', 'm'),
(0x1D5E1, 'M', 'n'),
(0x1D5E2, 'M', 'o'),
(0x1D5E3, 'M', 'p'),
(0x1D5E4, 'M', 'q'),
(0x1D5E5, 'M', 'r'),
(0x1D5E6, 'M', 's'),
(0x1D5E7, 'M', 't'),
(0x1D5E8, 'M', 'u'),
(0x1D5E9, 'M', 'v'),
(0x1D5EA, 'M', 'w'),
(0x1D5EB, 'M', 'x'),
(0x1D5EC, 'M', 'y'),
(0x1D5ED, 'M', 'z'),
(0x1D5EE, 'M', 'a'),
(0x1D5EF, 'M', 'b'),
(0x1D5F0, 'M', 'c'),
(0x1D5F1, 'M', 'd'),
(0x1D5F2, 'M', 'e'),
(0x1D5F3, 'M', 'f'),
(0x1D5F4, 'M', 'g'),
(0x1D5F5, 'M', 'h'),
(0x1D5F6, 'M', 'i'),
(0x1D5F7, 'M', 'j'),
(0x1D5F8, 'M', 'k'),
(0x1D5F9, 'M', 'l'),
(0x1D5FA, 'M', 'm'),
(0x1D5FB, 'M', 'n'),
(0x1D5FC, 'M', 'o'),
(0x1D5FD, 'M', 'p'),
(0x1D5FE, 'M', 'q'),
(0x1D5FF, 'M', 'r'),
(0x1D600, 'M', 's'),
(0x1D601, 'M', 't'),
(0x1D602, 'M', 'u'),
(0x1D603, 'M', 'v'),
(0x1D604, 'M', 'w'),
(0x1D605, 'M', 'x'),
(0x1D606, 'M', 'y'),
(0x1D607, 'M', 'z'),
(0x1D608, 'M', 'a'),
(0x1D609, 'M', 'b'),
(0x1D60A, 'M', 'c'),
(0x1D60B, 'M', 'd'),
(0x1D60C, 'M', 'e'),
(0x1D60D, 'M', 'f'),
(0x1D60E, 'M', 'g'),
(0x1D60F, 'M', 'h'),
(0x1D610, 'M', 'i'),
(0x1D611, 'M', 'j'),
(0x1D612, 'M', 'k'),
(0x1D613, 'M', 'l'),
(0x1D614, 'M', 'm'),
(0x1D615, 'M', 'n'),
(0x1D616, 'M', 'o'),
(0x1D617, 'M', 'p'),
(0x1D618, 'M', 'q'),
(0x1D619, 'M', 'r'),
(0x1D61A, 'M', 's'),
]
def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D61B, 'M', 't'),
(0x1D61C, 'M', 'u'),
(0x1D61D, 'M', 'v'),
(0x1D61E, 'M', 'w'),
(0x1D61F, 'M', 'x'),
(0x1D620, 'M', 'y'),
(0x1D621, 'M', 'z'),
(0x1D622, 'M', 'a'),
(0x1D623, 'M', 'b'),
(0x1D624, 'M', 'c'),
(0x1D625, 'M', 'd'),
(0x1D626, 'M', 'e'),
(0x1D627, 'M', 'f'),
(0x1D628, 'M', 'g'),
(0x1D629, 'M', 'h'),
(0x1D62A, 'M', 'i'),
(0x1D62B, 'M', 'j'),
(0x1D62C, 'M', 'k'),
(0x1D62D, 'M', 'l'),
(0x1D62E, 'M', 'm'),
(0x1D62F, 'M', 'n'),
(0x1D630, 'M', 'o'),
(0x1D631, 'M', 'p'),
(0x1D632, 'M', 'q'),
(0x1D633, 'M', 'r'),
(0x1D634, 'M', 's'),
(0x1D635, 'M', 't'),
(0x1D636, 'M', 'u'),
(0x1D637, 'M', 'v'),
(0x1D638, 'M', 'w'),
(0x1D639, 'M', 'x'),
(0x1D63A, 'M', 'y'),
(0x1D63B, 'M', 'z'),
(0x1D63C, 'M', 'a'),
(0x1D63D, 'M', 'b'),
(0x1D63E, 'M', 'c'),
(0x1D63F, 'M', 'd'),
(0x1D640, 'M', 'e'),
(0x1D641, 'M', 'f'),
(0x1D642, 'M', 'g'),
(0x1D643, 'M', 'h'),
(0x1D644, 'M', 'i'),
(0x1D645, 'M', 'j'),
(0x1D646, 'M', 'k'),
(0x1D647, 'M', 'l'),
(0x1D648, 'M', 'm'),
(0x1D649, 'M', 'n'),
(0x1D64A, 'M', 'o'),
(0x1D64B, 'M', 'p'),
(0x1D64C, 'M', 'q'),
(0x1D64D, 'M', 'r'),
(0x1D64E, 'M', 's'),
(0x1D64F, 'M', 't'),
(0x1D650, 'M', 'u'),
(0x1D651, 'M', 'v'),
(0x1D652, 'M', 'w'),
(0x1D653, 'M', 'x'),
(0x1D654, 'M', 'y'),
(0x1D655, 'M', 'z'),
(0x1D656, 'M', 'a'),
(0x1D657, 'M', 'b'),
(0x1D658, 'M', 'c'),
(0x1D659, 'M', 'd'),
(0x1D65A, 'M', 'e'),
(0x1D65B, 'M', 'f'),
(0x1D65C, 'M', 'g'),
(0x1D65D, 'M', 'h'),
(0x1D65E, 'M', 'i'),
(0x1D65F, 'M', 'j'),
(0x1D660, 'M', 'k'),
(0x1D661, 'M', 'l'),
(0x1D662, 'M', 'm'),
(0x1D663, 'M', 'n'),
(0x1D664, 'M', 'o'),
(0x1D665, 'M', 'p'),
(0x1D666, 'M', 'q'),
(0x1D667, 'M', 'r'),
(0x1D668, 'M', 's'),
(0x1D669, 'M', 't'),
(0x1D66A, 'M', 'u'),
(0x1D66B, 'M', 'v'),
(0x1D66C, 'M', 'w'),
(0x1D66D, 'M', 'x'),
(0x1D66E, 'M', 'y'),
(0x1D66F, 'M', 'z'),
(0x1D670, 'M', 'a'),
(0x1D671, 'M', 'b'),
(0x1D672, 'M', 'c'),
(0x1D673, 'M', 'd'),
(0x1D674, 'M', 'e'),
(0x1D675, 'M', 'f'),
(0x1D676, 'M', 'g'),
(0x1D677, 'M', 'h'),
(0x1D678, 'M', 'i'),
(0x1D679, 'M', 'j'),
(0x1D67A, 'M', 'k'),
(0x1D67B, 'M', 'l'),
(0x1D67C, 'M', 'm'),
(0x1D67D, 'M', 'n'),
(0x1D67E, 'M', 'o'),
]
def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D67F, 'M', 'p'),
(0x1D680, 'M', 'q'),
(0x1D681, 'M', 'r'),
(0x1D682, 'M', 's'),
(0x1D683, 'M', 't'),
(0x1D684, 'M', 'u'),
(0x1D685, 'M', 'v'),
(0x1D686, 'M', 'w'),
(0x1D687, 'M', 'x'),
(0x1D688, 'M', 'y'),
(0x1D689, 'M', 'z'),
(0x1D68A, 'M', 'a'),
(0x1D68B, 'M', 'b'),
(0x1D68C, 'M', 'c'),
(0x1D68D, 'M', 'd'),
(0x1D68E, 'M', 'e'),
(0x1D68F, 'M', 'f'),
(0x1D690, 'M', 'g'),
(0x1D691, 'M', 'h'),
(0x1D692, 'M', 'i'),
(0x1D693, 'M', 'j'),
(0x1D694, 'M', 'k'),
(0x1D695, 'M', 'l'),
(0x1D696, 'M', 'm'),
(0x1D697, 'M', 'n'),
(0x1D698, 'M', 'o'),
(0x1D699, 'M', 'p'),
(0x1D69A, 'M', 'q'),
(0x1D69B, 'M', 'r'),
(0x1D69C, 'M', 's'),
(0x1D69D, 'M', 't'),
(0x1D69E, 'M', 'u'),
(0x1D69F, 'M', 'v'),
(0x1D6A0, 'M', 'w'),
(0x1D6A1, 'M', 'x'),
(0x1D6A2, 'M', 'y'),
(0x1D6A3, 'M', 'z'),
(0x1D6A4, 'M', 'ı'),
(0x1D6A5, 'M', 'ȷ'),
(0x1D6A6, 'X'),
(0x1D6A8, 'M', 'α'),
(0x1D6A9, 'M', 'β'),
(0x1D6AA, 'M', 'γ'),
(0x1D6AB, 'M', 'δ'),
(0x1D6AC, 'M', 'ε'),
(0x1D6AD, 'M', 'ζ'),
(0x1D6AE, 'M', 'η'),
(0x1D6AF, 'M', 'θ'),
(0x1D6B0, 'M', 'ι'),
(0x1D6B1, 'M', 'κ'),
(0x1D6B2, 'M', 'λ'),
(0x1D6B3, 'M', 'μ'),
(0x1D6B4, 'M', 'ν'),
(0x1D6B5, 'M', 'ξ'),
(0x1D6B6, 'M', 'ο'),
(0x1D6B7, 'M', 'π'),
(0x1D6B8, 'M', 'ρ'),
(0x1D6B9, 'M', 'θ'),
(0x1D6BA, 'M', 'σ'),
(0x1D6BB, 'M', 'τ'),
(0x1D6BC, 'M', 'υ'),
(0x1D6BD, 'M', 'φ'),
(0x1D6BE, 'M', 'χ'),
(0x1D6BF, 'M', 'ψ'),
(0x1D6C0, 'M', 'ω'),
(0x1D6C1, 'M', '∇'),
(0x1D6C2, 'M', 'α'),
(0x1D6C3, 'M', 'β'),
(0x1D6C4, 'M', 'γ'),
(0x1D6C5, 'M', 'δ'),
(0x1D6C6, 'M', 'ε'),
(0x1D6C7, 'M', 'ζ'),
(0x1D6C8, 'M', 'η'),
(0x1D6C9, 'M', 'θ'),
(0x1D6CA, 'M', 'ι'),
(0x1D6CB, 'M', 'κ'),
(0x1D6CC, 'M', 'λ'),
(0x1D6CD, 'M', 'μ'),
(0x1D6CE, 'M', 'ν'),
(0x1D6CF, 'M', 'ξ'),
(0x1D6D0, 'M', 'ο'),
(0x1D6D1, 'M', 'π'),
(0x1D6D2, 'M', 'ρ'),
(0x1D6D3, 'M', 'σ'),
(0x1D6D5, 'M', 'τ'),
(0x1D6D6, 'M', 'υ'),
(0x1D6D7, 'M', 'φ'),
(0x1D6D8, 'M', 'χ'),
(0x1D6D9, 'M', 'ψ'),
(0x1D6DA, 'M', 'ω'),
(0x1D6DB, 'M', '∂'),
(0x1D6DC, 'M', 'ε'),
(0x1D6DD, 'M', 'θ'),
(0x1D6DE, 'M', 'κ'),
(0x1D6DF, 'M', 'φ'),
(0x1D6E0, 'M', 'ρ'),
(0x1D6E1, 'M', 'π'),
(0x1D6E2, 'M', 'α'),
(0x1D6E3, 'M', 'β'),
(0x1D6E4, 'M', 'γ'),
]
def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D6E5, 'M', 'δ'),
(0x1D6E6, 'M', 'ε'),
(0x1D6E7, 'M', 'ζ'),
(0x1D6E8, 'M', 'η'),
(0x1D6E9, 'M', 'θ'),
(0x1D6EA, 'M', 'ι'),
(0x1D6EB, 'M', 'κ'),
(0x1D6EC, 'M', 'λ'),
(0x1D6ED, 'M', 'μ'),
(0x1D6EE, 'M', 'ν'),
(0x1D6EF, 'M', 'ξ'),
(0x1D6F0, 'M', 'ο'),
(0x1D6F1, 'M', 'π'),
(0x1D6F2, 'M', 'ρ'),
(0x1D6F3, 'M', 'θ'),
(0x1D6F4, 'M', 'σ'),
(0x1D6F5, 'M', 'τ'),
(0x1D6F6, 'M', 'υ'),
(0x1D6F7, 'M', 'φ'),
(0x1D6F8, 'M', 'χ'),
(0x1D6F9, 'M', 'ψ'),
(0x1D6FA, 'M', 'ω'),
(0x1D6FB, 'M', '∇'),
(0x1D6FC, 'M', 'α'),
(0x1D6FD, 'M', 'β'),
(0x1D6FE, 'M', 'γ'),
(0x1D6FF, 'M', 'δ'),
(0x1D700, 'M', 'ε'),
(0x1D701, 'M', 'ζ'),
(0x1D702, 'M', 'η'),
(0x1D703, 'M', 'θ'),
(0x1D704, 'M', 'ι'),
(0x1D705, 'M', 'κ'),
(0x1D706, 'M', 'λ'),
(0x1D707, 'M', 'μ'),
(0x1D708, 'M', 'ν'),
(0x1D709, 'M', 'ξ'),
(0x1D70A, 'M', 'ο'),
(0x1D70B, 'M', 'π'),
(0x1D70C, 'M', 'ρ'),
(0x1D70D, 'M', 'σ'),
(0x1D70F, 'M', 'τ'),
(0x1D710, 'M', 'υ'),
(0x1D711, 'M', 'φ'),
(0x1D712, 'M', 'χ'),
(0x1D713, 'M', 'ψ'),
(0x1D714, 'M', 'ω'),
(0x1D715, 'M', '∂'),
(0x1D716, 'M', 'ε'),
(0x1D717, 'M', 'θ'),
(0x1D718, 'M', 'κ'),
(0x1D719, 'M', 'φ'),
(0x1D71A, 'M', 'ρ'),
(0x1D71B, 'M', 'π'),
(0x1D71C, 'M', 'α'),
(0x1D71D, 'M', 'β'),
(0x1D71E, 'M', 'γ'),
(0x1D71F, 'M', 'δ'),
(0x1D720, 'M', 'ε'),
(0x1D721, 'M', 'ζ'),
(0x1D722, 'M', 'η'),
(0x1D723, 'M', 'θ'),
(0x1D724, 'M', 'ι'),
(0x1D725, 'M', 'κ'),
(0x1D726, 'M', 'λ'),
(0x1D727, 'M', 'μ'),
(0x1D728, 'M', 'ν'),
(0x1D729, 'M', 'ξ'),
(0x1D72A, 'M', 'ο'),
(0x1D72B, 'M', 'π'),
(0x1D72C, 'M', 'ρ'),
(0x1D72D, 'M', 'θ'),
(0x1D72E, 'M', 'σ'),
(0x1D72F, 'M', 'τ'),
(0x1D730, 'M', 'υ'),
(0x1D731, 'M', 'φ'),
(0x1D732, 'M', 'χ'),
(0x1D733, 'M', 'ψ'),
(0x1D734, 'M', 'ω'),
(0x1D735, 'M', '∇'),
(0x1D736, 'M', 'α'),
(0x1D737, 'M', 'β'),
(0x1D738, 'M', 'γ'),
(0x1D739, 'M', 'δ'),
(0x1D73A, 'M', 'ε'),
(0x1D73B, 'M', 'ζ'),
(0x1D73C, 'M', 'η'),
(0x1D73D, 'M', 'θ'),
(0x1D73E, 'M', 'ι'),
(0x1D73F, 'M', 'κ'),
(0x1D740, 'M', 'λ'),
(0x1D741, 'M', 'μ'),
(0x1D742, 'M', 'ν'),
(0x1D743, 'M', 'ξ'),
(0x1D744, 'M', 'ο'),
(0x1D745, 'M', 'π'),
(0x1D746, 'M', 'ρ'),
(0x1D747, 'M', 'σ'),
(0x1D749, 'M', 'τ'),
(0x1D74A, 'M', 'υ'),
]
def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D74B, 'M', 'φ'),
(0x1D74C, 'M', 'χ'),
(0x1D74D, 'M', 'ψ'),
(0x1D74E, 'M', 'ω'),
(0x1D74F, 'M', '∂'),
(0x1D750, 'M', 'ε'),
(0x1D751, 'M', 'θ'),
(0x1D752, 'M', 'κ'),
(0x1D753, 'M', 'φ'),
(0x1D754, 'M', 'ρ'),
(0x1D755, 'M', 'π'),
(0x1D756, 'M', 'α'),
(0x1D757, 'M', 'β'),
(0x1D758, 'M', 'γ'),
(0x1D759, 'M', 'δ'),
(0x1D75A, 'M', 'ε'),
(0x1D75B, 'M', 'ζ'),
(0x1D75C, 'M', 'η'),
(0x1D75D, 'M', 'θ'),
(0x1D75E, 'M', 'ι'),
(0x1D75F, 'M', 'κ'),
(0x1D760, 'M', 'λ'),
(0x1D761, 'M', 'μ'),
(0x1D762, 'M', 'ν'),
(0x1D763, 'M', 'ξ'),
(0x1D764, 'M', 'ο'),
(0x1D765, 'M', 'π'),
(0x1D766, 'M', 'ρ'),
(0x1D767, 'M', 'θ'),
(0x1D768, 'M', 'σ'),
(0x1D769, 'M', 'τ'),
(0x1D76A, 'M', 'υ'),
(0x1D76B, 'M', 'φ'),
(0x1D76C, 'M', 'χ'),
(0x1D76D, 'M', 'ψ'),
(0x1D76E, 'M', 'ω'),
(0x1D76F, 'M', '∇'),
(0x1D770, 'M', 'α'),
(0x1D771, 'M', 'β'),
(0x1D772, 'M', 'γ'),
(0x1D773, 'M', 'δ'),
(0x1D774, 'M', 'ε'),
(0x1D775, 'M', 'ζ'),
(0x1D776, 'M', 'η'),
(0x1D777, 'M', 'θ'),
(0x1D778, 'M', 'ι'),
(0x1D779, 'M', 'κ'),
(0x1D77A, 'M', 'λ'),
(0x1D77B, 'M', 'μ'),
(0x1D77C, 'M', 'ν'),
(0x1D77D, 'M', 'ξ'),
(0x1D77E, 'M', 'ο'),
(0x1D77F, 'M', 'π'),
(0x1D780, 'M', 'ρ'),
(0x1D781, 'M', 'σ'),
(0x1D783, 'M', 'τ'),
(0x1D784, 'M', 'υ'),
(0x1D785, 'M', 'φ'),
(0x1D786, 'M', 'χ'),
(0x1D787, 'M', 'ψ'),
(0x1D788, 'M', 'ω'),
(0x1D789, 'M', '∂'),
(0x1D78A, 'M', 'ε'),
(0x1D78B, 'M', 'θ'),
(0x1D78C, 'M', 'κ'),
(0x1D78D, 'M', 'φ'),
(0x1D78E, 'M', 'ρ'),
(0x1D78F, 'M', 'π'),
(0x1D790, 'M', 'α'),
(0x1D791, 'M', 'β'),
(0x1D792, 'M', 'γ'),
(0x1D793, 'M', 'δ'),
(0x1D794, 'M', 'ε'),
(0x1D795, 'M', 'ζ'),
(0x1D796, 'M', 'η'),
(0x1D797, 'M', 'θ'),
(0x1D798, 'M', 'ι'),
(0x1D799, 'M', 'κ'),
(0x1D79A, 'M', 'λ'),
(0x1D79B, 'M', 'μ'),
(0x1D79C, 'M', 'ν'),
(0x1D79D, 'M', 'ξ'),
(0x1D79E, 'M', 'ο'),
(0x1D79F, 'M', 'π'),
(0x1D7A0, 'M', 'ρ'),
(0x1D7A1, 'M', 'θ'),
(0x1D7A2, 'M', 'σ'),
(0x1D7A3, 'M', 'τ'),
(0x1D7A4, 'M', 'υ'),
(0x1D7A5, 'M', 'φ'),
(0x1D7A6, 'M', 'χ'),
(0x1D7A7, 'M', 'ψ'),
(0x1D7A8, 'M', 'ω'),
(0x1D7A9, 'M', '∇'),
(0x1D7AA, 'M', 'α'),
(0x1D7AB, 'M', 'β'),
(0x1D7AC, 'M', 'γ'),
(0x1D7AD, 'M', 'δ'),
(0x1D7AE, 'M', 'ε'),
(0x1D7AF, 'M', 'ζ'),
]
def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1D7B0, 'M', 'η'),
(0x1D7B1, 'M', 'θ'),
(0x1D7B2, 'M', 'ι'),
(0x1D7B3, 'M', 'κ'),
(0x1D7B4, 'M', 'λ'),
(0x1D7B5, 'M', 'μ'),
(0x1D7B6, 'M', 'ν'),
(0x1D7B7, 'M', 'ξ'),
(0x1D7B8, 'M', 'ο'),
(0x1D7B9, 'M', 'π'),
(0x1D7BA, 'M', 'ρ'),
(0x1D7BB, 'M', 'σ'),
(0x1D7BD, 'M', 'τ'),
(0x1D7BE, 'M', 'υ'),
(0x1D7BF, 'M', 'φ'),
(0x1D7C0, 'M', 'χ'),
(0x1D7C1, 'M', 'ψ'),
(0x1D7C2, 'M', 'ω'),
(0x1D7C3, 'M', '∂'),
(0x1D7C4, 'M', 'ε'),
(0x1D7C5, 'M', 'θ'),
(0x1D7C6, 'M', 'κ'),
(0x1D7C7, 'M', 'φ'),
(0x1D7C8, 'M', 'ρ'),
(0x1D7C9, 'M', 'π'),
(0x1D7CA, 'M', 'ϝ'),
(0x1D7CC, 'X'),
(0x1D7CE, 'M', '0'),
(0x1D7CF, 'M', '1'),
(0x1D7D0, 'M', '2'),
(0x1D7D1, 'M', '3'),
(0x1D7D2, 'M', '4'),
(0x1D7D3, 'M', '5'),
(0x1D7D4, 'M', '6'),
(0x1D7D5, 'M', '7'),
(0x1D7D6, 'M', '8'),
(0x1D7D7, 'M', '9'),
(0x1D7D8, 'M', '0'),
(0x1D7D9, 'M', '1'),
(0x1D7DA, 'M', '2'),
(0x1D7DB, 'M', '3'),
(0x1D7DC, 'M', '4'),
(0x1D7DD, 'M', '5'),
(0x1D7DE, 'M', '6'),
(0x1D7DF, 'M', '7'),
(0x1D7E0, 'M', '8'),
(0x1D7E1, 'M', '9'),
(0x1D7E2, 'M', '0'),
(0x1D7E3, 'M', '1'),
(0x1D7E4, 'M', '2'),
(0x1D7E5, 'M', '3'),
(0x1D7E6, 'M', '4'),
(0x1D7E7, 'M', '5'),
(0x1D7E8, 'M', '6'),
(0x1D7E9, 'M', '7'),
(0x1D7EA, 'M', '8'),
(0x1D7EB, 'M', '9'),
(0x1D7EC, 'M', '0'),
(0x1D7ED, 'M', '1'),
(0x1D7EE, 'M', '2'),
(0x1D7EF, 'M', '3'),
(0x1D7F0, 'M', '4'),
(0x1D7F1, 'M', '5'),
(0x1D7F2, 'M', '6'),
(0x1D7F3, 'M', '7'),
(0x1D7F4, 'M', '8'),
(0x1D7F5, 'M', '9'),
(0x1D7F6, 'M', '0'),
(0x1D7F7, 'M', '1'),
(0x1D7F8, 'M', '2'),
(0x1D7F9, 'M', '3'),
(0x1D7FA, 'M', '4'),
(0x1D7FB, 'M', '5'),
(0x1D7FC, 'M', '6'),
(0x1D7FD, 'M', '7'),
(0x1D7FE, 'M', '8'),
(0x1D7FF, 'M', '9'),
(0x1D800, 'V'),
(0x1DA8C, 'X'),
(0x1DA9B, 'V'),
(0x1DAA0, 'X'),
(0x1DAA1, 'V'),
(0x1DAB0, 'X'),
(0x1DF00, 'V'),
(0x1DF1F, 'X'),
(0x1DF25, 'V'),
(0x1DF2B, 'X'),
(0x1E000, 'V'),
(0x1E007, 'X'),
(0x1E008, 'V'),
(0x1E019, 'X'),
(0x1E01B, 'V'),
(0x1E022, 'X'),
(0x1E023, 'V'),
(0x1E025, 'X'),
(0x1E026, 'V'),
(0x1E02B, 'X'),
(0x1E030, 'M', 'а'),
(0x1E031, 'M', 'б'),
(0x1E032, 'M', 'в'),
]
def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1E033, 'M', 'г'),
(0x1E034, 'M', 'д'),
(0x1E035, 'M', 'е'),
(0x1E036, 'M', 'ж'),
(0x1E037, 'M', 'з'),
(0x1E038, 'M', 'и'),
(0x1E039, 'M', 'к'),
(0x1E03A, 'M', 'л'),
(0x1E03B, 'M', 'м'),
(0x1E03C, 'M', 'о'),
(0x1E03D, 'M', 'п'),
(0x1E03E, 'M', 'р'),
(0x1E03F, 'M', 'с'),
(0x1E040, 'M', 'т'),
(0x1E041, 'M', 'у'),
(0x1E042, 'M', 'ф'),
(0x1E043, 'M', 'х'),
(0x1E044, 'M', 'ц'),
(0x1E045, 'M', 'ч'),
(0x1E046, 'M', 'ш'),
(0x1E047, 'M', 'ы'),
(0x1E048, 'M', 'э'),
(0x1E049, 'M', 'ю'),
(0x1E04A, 'M', 'ꚉ'),
(0x1E04B, 'M', 'ә'),
(0x1E04C, 'M', 'і'),
(0x1E04D, 'M', 'ј'),
(0x1E04E, 'M', 'ө'),
(0x1E04F, 'M', 'ү'),
(0x1E050, 'M', 'ӏ'),
(0x1E051, 'M', 'а'),
(0x1E052, 'M', 'б'),
(0x1E053, 'M', 'в'),
(0x1E054, 'M', 'г'),
(0x1E055, 'M', 'д'),
(0x1E056, 'M', 'е'),
(0x1E057, 'M', 'ж'),
(0x1E058, 'M', 'з'),
(0x1E059, 'M', 'и'),
(0x1E05A, 'M', 'к'),
(0x1E05B, 'M', 'л'),
(0x1E05C, 'M', 'о'),
(0x1E05D, 'M', 'п'),
(0x1E05E, 'M', 'с'),
(0x1E05F, 'M', 'у'),
(0x1E060, 'M', 'ф'),
(0x1E061, 'M', 'х'),
(0x1E062, 'M', 'ц'),
(0x1E063, 'M', 'ч'),
(0x1E064, 'M', 'ш'),
(0x1E065, 'M', 'ъ'),
(0x1E066, 'M', 'ы'),
(0x1E067, 'M', 'ґ'),
(0x1E068, 'M', 'і'),
(0x1E069, 'M', 'ѕ'),
(0x1E06A, 'M', 'џ'),
(0x1E06B, 'M', 'ҫ'),
(0x1E06C, 'M', 'ꙑ'),
(0x1E06D, 'M', 'ұ'),
(0x1E06E, 'X'),
(0x1E08F, 'V'),
(0x1E090, 'X'),
(0x1E100, 'V'),
(0x1E12D, 'X'),
(0x1E130, 'V'),
(0x1E13E, 'X'),
(0x1E140, 'V'),
(0x1E14A, 'X'),
(0x1E14E, 'V'),
(0x1E150, 'X'),
(0x1E290, 'V'),
(0x1E2AF, 'X'),
(0x1E2C0, 'V'),
(0x1E2FA, 'X'),
(0x1E2FF, 'V'),
(0x1E300, 'X'),
(0x1E4D0, 'V'),
(0x1E4FA, 'X'),
(0x1E7E0, 'V'),
(0x1E7E7, 'X'),
(0x1E7E8, 'V'),
(0x1E7EC, 'X'),
(0x1E7ED, 'V'),
(0x1E7EF, 'X'),
(0x1E7F0, 'V'),
(0x1E7FF, 'X'),
(0x1E800, 'V'),
(0x1E8C5, 'X'),
(0x1E8C7, 'V'),
(0x1E8D7, 'X'),
(0x1E900, 'M', '𞤢'),
(0x1E901, 'M', '𞤣'),
(0x1E902, 'M', '𞤤'),
(0x1E903, 'M', '𞤥'),
(0x1E904, 'M', '𞤦'),
(0x1E905, 'M', '𞤧'),
(0x1E906, 'M', '𞤨'),
(0x1E907, 'M', '𞤩'),
(0x1E908, 'M', '𞤪'),
(0x1E909, 'M', '𞤫'),
]
def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1E90A, 'M', '𞤬'),
(0x1E90B, 'M', '𞤭'),
(0x1E90C, 'M', '𞤮'),
(0x1E90D, 'M', '𞤯'),
(0x1E90E, 'M', '𞤰'),
(0x1E90F, 'M', '𞤱'),
(0x1E910, 'M', '𞤲'),
(0x1E911, 'M', '𞤳'),
(0x1E912, 'M', '𞤴'),
(0x1E913, 'M', '𞤵'),
(0x1E914, 'M', '𞤶'),
(0x1E915, 'M', '𞤷'),
(0x1E916, 'M', '𞤸'),
(0x1E917, 'M', '𞤹'),
(0x1E918, 'M', '𞤺'),
(0x1E919, 'M', '𞤻'),
(0x1E91A, 'M', '𞤼'),
(0x1E91B, 'M', '𞤽'),
(0x1E91C, 'M', '𞤾'),
(0x1E91D, 'M', '𞤿'),
(0x1E91E, 'M', '𞥀'),
(0x1E91F, 'M', '𞥁'),
(0x1E920, 'M', '𞥂'),
(0x1E921, 'M', '𞥃'),
(0x1E922, 'V'),
(0x1E94C, 'X'),
(0x1E950, 'V'),
(0x1E95A, 'X'),
(0x1E95E, 'V'),
(0x1E960, 'X'),
(0x1EC71, 'V'),
(0x1ECB5, 'X'),
(0x1ED01, 'V'),
(0x1ED3E, 'X'),
(0x1EE00, 'M', 'ا'),
(0x1EE01, 'M', 'ب'),
(0x1EE02, 'M', 'ج'),
(0x1EE03, 'M', 'د'),
(0x1EE04, 'X'),
(0x1EE05, 'M', 'و'),
(0x1EE06, 'M', 'ز'),
(0x1EE07, 'M', 'ح'),
(0x1EE08, 'M', 'ط'),
(0x1EE09, 'M', 'ي'),
(0x1EE0A, 'M', 'ك'),
(0x1EE0B, 'M', 'ل'),
(0x1EE0C, 'M', 'م'),
(0x1EE0D, 'M', 'ن'),
(0x1EE0E, 'M', 'س'),
(0x1EE0F, 'M', 'ع'),
(0x1EE10, 'M', 'ف'),
(0x1EE11, 'M', 'ص'),
(0x1EE12, 'M', 'ق'),
(0x1EE13, 'M', 'ر'),
(0x1EE14, 'M', 'ش'),
(0x1EE15, 'M', 'ت'),
(0x1EE16, 'M', 'ث'),
(0x1EE17, 'M', 'خ'),
(0x1EE18, 'M', 'ذ'),
(0x1EE19, 'M', 'ض'),
(0x1EE1A, 'M', 'ظ'),
(0x1EE1B, 'M', 'غ'),
(0x1EE1C, 'M', 'ٮ'),
(0x1EE1D, 'M', 'ں'),
(0x1EE1E, 'M', 'ڡ'),
(0x1EE1F, 'M', 'ٯ'),
(0x1EE20, 'X'),
(0x1EE21, 'M', 'ب'),
(0x1EE22, 'M', 'ج'),
(0x1EE23, 'X'),
(0x1EE24, 'M', 'ه'),
(0x1EE25, 'X'),
(0x1EE27, 'M', 'ح'),
(0x1EE28, 'X'),
(0x1EE29, 'M', 'ي'),
(0x1EE2A, 'M', 'ك'),
(0x1EE2B, 'M', 'ل'),
(0x1EE2C, 'M', 'م'),
(0x1EE2D, 'M', 'ن'),
(0x1EE2E, 'M', 'س'),
(0x1EE2F, 'M', 'ع'),
(0x1EE30, 'M', 'ف'),
(0x1EE31, 'M', 'ص'),
(0x1EE32, 'M', 'ق'),
(0x1EE33, 'X'),
(0x1EE34, 'M', 'ش'),
(0x1EE35, 'M', 'ت'),
(0x1EE36, 'M', 'ث'),
(0x1EE37, 'M', 'خ'),
(0x1EE38, 'X'),
(0x1EE39, 'M', 'ض'),
(0x1EE3A, 'X'),
(0x1EE3B, 'M', 'غ'),
(0x1EE3C, 'X'),
(0x1EE42, 'M', 'ج'),
(0x1EE43, 'X'),
(0x1EE47, 'M', 'ح'),
(0x1EE48, 'X'),
(0x1EE49, 'M', 'ي'),
(0x1EE4A, 'X'),
]
def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1EE4B, 'M', 'ل'),
(0x1EE4C, 'X'),
(0x1EE4D, 'M', 'ن'),
(0x1EE4E, 'M', 'س'),
(0x1EE4F, 'M', 'ع'),
(0x1EE50, 'X'),
(0x1EE51, 'M', 'ص'),
(0x1EE52, 'M', 'ق'),
(0x1EE53, 'X'),
(0x1EE54, 'M', 'ش'),
(0x1EE55, 'X'),
(0x1EE57, 'M', 'خ'),
(0x1EE58, 'X'),
(0x1EE59, 'M', 'ض'),
(0x1EE5A, 'X'),
(0x1EE5B, 'M', 'غ'),
(0x1EE5C, 'X'),
(0x1EE5D, 'M', 'ں'),
(0x1EE5E, 'X'),
(0x1EE5F, 'M', 'ٯ'),
(0x1EE60, 'X'),
(0x1EE61, 'M', 'ب'),
(0x1EE62, 'M', 'ج'),
(0x1EE63, 'X'),
(0x1EE64, 'M', 'ه'),
(0x1EE65, 'X'),
(0x1EE67, 'M', 'ح'),
(0x1EE68, 'M', 'ط'),
(0x1EE69, 'M', 'ي'),
(0x1EE6A, 'M', 'ك'),
(0x1EE6B, 'X'),
(0x1EE6C, 'M', 'م'),
(0x1EE6D, 'M', 'ن'),
(0x1EE6E, 'M', 'س'),
(0x1EE6F, 'M', 'ع'),
(0x1EE70, 'M', 'ف'),
(0x1EE71, 'M', 'ص'),
(0x1EE72, 'M', 'ق'),
(0x1EE73, 'X'),
(0x1EE74, 'M', 'ش'),
(0x1EE75, 'M', 'ت'),
(0x1EE76, 'M', 'ث'),
(0x1EE77, 'M', 'خ'),
(0x1EE78, 'X'),
(0x1EE79, 'M', 'ض'),
(0x1EE7A, 'M', 'ظ'),
(0x1EE7B, 'M', 'غ'),
(0x1EE7C, 'M', 'ٮ'),
(0x1EE7D, 'X'),
(0x1EE7E, 'M', 'ڡ'),
(0x1EE7F, 'X'),
(0x1EE80, 'M', 'ا'),
(0x1EE81, 'M', 'ب'),
(0x1EE82, 'M', 'ج'),
(0x1EE83, 'M', 'د'),
(0x1EE84, 'M', 'ه'),
(0x1EE85, 'M', 'و'),
(0x1EE86, 'M', 'ز'),
(0x1EE87, 'M', 'ح'),
(0x1EE88, 'M', 'ط'),
(0x1EE89, 'M', 'ي'),
(0x1EE8A, 'X'),
(0x1EE8B, 'M', 'ل'),
(0x1EE8C, 'M', 'م'),
(0x1EE8D, 'M', 'ن'),
(0x1EE8E, 'M', 'س'),
(0x1EE8F, 'M', 'ع'),
(0x1EE90, 'M', 'ف'),
(0x1EE91, 'M', 'ص'),
(0x1EE92, 'M', 'ق'),
(0x1EE93, 'M', 'ر'),
(0x1EE94, 'M', 'ش'),
(0x1EE95, 'M', 'ت'),
(0x1EE96, 'M', 'ث'),
(0x1EE97, 'M', 'خ'),
(0x1EE98, 'M', 'ذ'),
(0x1EE99, 'M', 'ض'),
(0x1EE9A, 'M', 'ظ'),
(0x1EE9B, 'M', 'غ'),
(0x1EE9C, 'X'),
(0x1EEA1, 'M', 'ب'),
(0x1EEA2, 'M', 'ج'),
(0x1EEA3, 'M', 'د'),
(0x1EEA4, 'X'),
(0x1EEA5, 'M', 'و'),
(0x1EEA6, 'M', 'ز'),
(0x1EEA7, 'M', 'ح'),
(0x1EEA8, 'M', 'ط'),
(0x1EEA9, 'M', 'ي'),
(0x1EEAA, 'X'),
(0x1EEAB, 'M', 'ل'),
(0x1EEAC, 'M', 'م'),
(0x1EEAD, 'M', 'ن'),
(0x1EEAE, 'M', 'س'),
(0x1EEAF, 'M', 'ع'),
(0x1EEB0, 'M', 'ف'),
(0x1EEB1, 'M', 'ص'),
(0x1EEB2, 'M', 'ق'),
(0x1EEB3, 'M', 'ر'),
(0x1EEB4, 'M', 'ش'),
]
def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1EEB5, 'M', 'ت'),
(0x1EEB6, 'M', 'ث'),
(0x1EEB7, 'M', 'خ'),
(0x1EEB8, 'M', 'ذ'),
(0x1EEB9, 'M', 'ض'),
(0x1EEBA, 'M', 'ظ'),
(0x1EEBB, 'M', 'غ'),
(0x1EEBC, 'X'),
(0x1EEF0, 'V'),
(0x1EEF2, 'X'),
(0x1F000, 'V'),
(0x1F02C, 'X'),
(0x1F030, 'V'),
(0x1F094, 'X'),
(0x1F0A0, 'V'),
(0x1F0AF, 'X'),
(0x1F0B1, 'V'),
(0x1F0C0, 'X'),
(0x1F0C1, 'V'),
(0x1F0D0, 'X'),
(0x1F0D1, 'V'),
(0x1F0F6, 'X'),
(0x1F101, '3', '0,'),
(0x1F102, '3', '1,'),
(0x1F103, '3', '2,'),
(0x1F104, '3', '3,'),
(0x1F105, '3', '4,'),
(0x1F106, '3', '5,'),
(0x1F107, '3', '6,'),
(0x1F108, '3', '7,'),
(0x1F109, '3', '8,'),
(0x1F10A, '3', '9,'),
(0x1F10B, 'V'),
(0x1F110, '3', '(a)'),
(0x1F111, '3', '(b)'),
(0x1F112, '3', '(c)'),
(0x1F113, '3', '(d)'),
(0x1F114, '3', '(e)'),
(0x1F115, '3', '(f)'),
(0x1F116, '3', '(g)'),
(0x1F117, '3', '(h)'),
(0x1F118, '3', '(i)'),
(0x1F119, '3', '(j)'),
(0x1F11A, '3', '(k)'),
(0x1F11B, '3', '(l)'),
(0x1F11C, '3', '(m)'),
(0x1F11D, '3', '(n)'),
(0x1F11E, '3', '(o)'),
(0x1F11F, '3', '(p)'),
(0x1F120, '3', '(q)'),
(0x1F121, '3', '(r)'),
(0x1F122, '3', '(s)'),
(0x1F123, '3', '(t)'),
(0x1F124, '3', '(u)'),
(0x1F125, '3', '(v)'),
(0x1F126, '3', '(w)'),
(0x1F127, '3', '(x)'),
(0x1F128, '3', '(y)'),
(0x1F129, '3', '(z)'),
(0x1F12A, 'M', '〔s〕'),
(0x1F12B, 'M', 'c'),
(0x1F12C, 'M', 'r'),
(0x1F12D, 'M', 'cd'),
(0x1F12E, 'M', 'wz'),
(0x1F12F, 'V'),
(0x1F130, 'M', 'a'),
(0x1F131, 'M', 'b'),
(0x1F132, 'M', 'c'),
(0x1F133, 'M', 'd'),
(0x1F134, 'M', 'e'),
(0x1F135, 'M', 'f'),
(0x1F136, 'M', 'g'),
(0x1F137, 'M', 'h'),
(0x1F138, 'M', 'i'),
(0x1F139, 'M', 'j'),
(0x1F13A, 'M', 'k'),
(0x1F13B, 'M', 'l'),
(0x1F13C, 'M', 'm'),
(0x1F13D, 'M', 'n'),
(0x1F13E, 'M', 'o'),
(0x1F13F, 'M', 'p'),
(0x1F140, 'M', 'q'),
(0x1F141, 'M', 'r'),
(0x1F142, 'M', 's'),
(0x1F143, 'M', 't'),
(0x1F144, 'M', 'u'),
(0x1F145, 'M', 'v'),
(0x1F146, 'M', 'w'),
(0x1F147, 'M', 'x'),
(0x1F148, 'M', 'y'),
(0x1F149, 'M', 'z'),
(0x1F14A, 'M', 'hv'),
(0x1F14B, 'M', 'mv'),
(0x1F14C, 'M', 'sd'),
(0x1F14D, 'M', 'ss'),
(0x1F14E, 'M', 'ppv'),
(0x1F14F, 'M', 'wc'),
(0x1F150, 'V'),
(0x1F16A, 'M', 'mc'),
(0x1F16B, 'M', 'md'),
]
def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1F16C, 'M', 'mr'),
(0x1F16D, 'V'),
(0x1F190, 'M', 'dj'),
(0x1F191, 'V'),
(0x1F1AE, 'X'),
(0x1F1E6, 'V'),
(0x1F200, 'M', 'ほか'),
(0x1F201, 'M', 'ココ'),
(0x1F202, 'M', 'サ'),
(0x1F203, 'X'),
(0x1F210, 'M', '手'),
(0x1F211, 'M', '字'),
(0x1F212, 'M', '双'),
(0x1F213, 'M', 'デ'),
(0x1F214, 'M', '二'),
(0x1F215, 'M', '多'),
(0x1F216, 'M', '解'),
(0x1F217, 'M', '天'),
(0x1F218, 'M', '交'),
(0x1F219, 'M', '映'),
(0x1F21A, 'M', '無'),
(0x1F21B, 'M', '料'),
(0x1F21C, 'M', '前'),
(0x1F21D, 'M', '後'),
(0x1F21E, 'M', '再'),
(0x1F21F, 'M', '新'),
(0x1F220, 'M', '初'),
(0x1F221, 'M', '終'),
(0x1F222, 'M', '生'),
(0x1F223, 'M', '販'),
(0x1F224, 'M', '声'),
(0x1F225, 'M', '吹'),
(0x1F226, 'M', '演'),
(0x1F227, 'M', '投'),
(0x1F228, 'M', '捕'),
(0x1F229, 'M', '一'),
(0x1F22A, 'M', '三'),
(0x1F22B, 'M', '遊'),
(0x1F22C, 'M', '左'),
(0x1F22D, 'M', '中'),
(0x1F22E, 'M', '右'),
(0x1F22F, 'M', '指'),
(0x1F230, 'M', '走'),
(0x1F231, 'M', '打'),
(0x1F232, 'M', '禁'),
(0x1F233, 'M', '空'),
(0x1F234, 'M', '合'),
(0x1F235, 'M', '満'),
(0x1F236, 'M', '有'),
(0x1F237, 'M', '月'),
(0x1F238, 'M', '申'),
(0x1F239, 'M', '割'),
(0x1F23A, 'M', '営'),
(0x1F23B, 'M', '配'),
(0x1F23C, 'X'),
(0x1F240, 'M', '〔本〕'),
(0x1F241, 'M', '〔三〕'),
(0x1F242, 'M', '〔二〕'),
(0x1F243, 'M', '〔安〕'),
(0x1F244, 'M', '〔点〕'),
(0x1F245, 'M', '〔打〕'),
(0x1F246, 'M', '〔盗〕'),
(0x1F247, 'M', '〔勝〕'),
(0x1F248, 'M', '〔敗〕'),
(0x1F249, 'X'),
(0x1F250, 'M', '得'),
(0x1F251, 'M', '可'),
(0x1F252, 'X'),
(0x1F260, 'V'),
(0x1F266, 'X'),
(0x1F300, 'V'),
(0x1F6D8, 'X'),
(0x1F6DC, 'V'),
(0x1F6ED, 'X'),
(0x1F6F0, 'V'),
(0x1F6FD, 'X'),
(0x1F700, 'V'),
(0x1F777, 'X'),
(0x1F77B, 'V'),
(0x1F7DA, 'X'),
(0x1F7E0, 'V'),
(0x1F7EC, 'X'),
(0x1F7F0, 'V'),
(0x1F7F1, 'X'),
(0x1F800, 'V'),
(0x1F80C, 'X'),
(0x1F810, 'V'),
(0x1F848, 'X'),
(0x1F850, 'V'),
(0x1F85A, 'X'),
(0x1F860, 'V'),
(0x1F888, 'X'),
(0x1F890, 'V'),
(0x1F8AE, 'X'),
(0x1F8B0, 'V'),
(0x1F8B2, 'X'),
(0x1F900, 'V'),
(0x1FA54, 'X'),
(0x1FA60, 'V'),
(0x1FA6E, 'X'),
]
def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x1FA70, 'V'),
(0x1FA7D, 'X'),
(0x1FA80, 'V'),
(0x1FA89, 'X'),
(0x1FA90, 'V'),
(0x1FABE, 'X'),
(0x1FABF, 'V'),
(0x1FAC6, 'X'),
(0x1FACE, 'V'),
(0x1FADC, 'X'),
(0x1FAE0, 'V'),
(0x1FAE9, 'X'),
(0x1FAF0, 'V'),
(0x1FAF9, 'X'),
(0x1FB00, 'V'),
(0x1FB93, 'X'),
(0x1FB94, 'V'),
(0x1FBCB, 'X'),
(0x1FBF0, 'M', '0'),
(0x1FBF1, 'M', '1'),
(0x1FBF2, 'M', '2'),
(0x1FBF3, 'M', '3'),
(0x1FBF4, 'M', '4'),
(0x1FBF5, 'M', '5'),
(0x1FBF6, 'M', '6'),
(0x1FBF7, 'M', '7'),
(0x1FBF8, 'M', '8'),
(0x1FBF9, 'M', '9'),
(0x1FBFA, 'X'),
(0x20000, 'V'),
(0x2A6E0, 'X'),
(0x2A700, 'V'),
(0x2B73A, 'X'),
(0x2B740, 'V'),
(0x2B81E, 'X'),
(0x2B820, 'V'),
(0x2CEA2, 'X'),
(0x2CEB0, 'V'),
(0x2EBE1, 'X'),
(0x2F800, 'M', '丽'),
(0x2F801, 'M', '丸'),
(0x2F802, 'M', '乁'),
(0x2F803, 'M', '𠄢'),
(0x2F804, 'M', '你'),
(0x2F805, 'M', '侮'),
(0x2F806, 'M', '侻'),
(0x2F807, 'M', '倂'),
(0x2F808, 'M', '偺'),
(0x2F809, 'M', '備'),
(0x2F80A, 'M', '僧'),
(0x2F80B, 'M', '像'),
(0x2F80C, 'M', '㒞'),
(0x2F80D, 'M', '𠘺'),
(0x2F80E, 'M', '免'),
(0x2F80F, 'M', '兔'),
(0x2F810, 'M', '兤'),
(0x2F811, 'M', '具'),
(0x2F812, 'M', '𠔜'),
(0x2F813, 'M', '㒹'),
(0x2F814, 'M', '內'),
(0x2F815, 'M', '再'),
(0x2F816, 'M', '𠕋'),
(0x2F817, 'M', '冗'),
(0x2F818, 'M', '冤'),
(0x2F819, 'M', '仌'),
(0x2F81A, 'M', '冬'),
(0x2F81B, 'M', '况'),
(0x2F81C, 'M', '𩇟'),
(0x2F81D, 'M', '凵'),
(0x2F81E, 'M', '刃'),
(0x2F81F, 'M', '㓟'),
(0x2F820, 'M', '刻'),
(0x2F821, 'M', '剆'),
(0x2F822, 'M', '割'),
(0x2F823, 'M', '剷'),
(0x2F824, 'M', '㔕'),
(0x2F825, 'M', '勇'),
(0x2F826, 'M', '勉'),
(0x2F827, 'M', '勤'),
(0x2F828, 'M', '勺'),
(0x2F829, 'M', '包'),
(0x2F82A, 'M', '匆'),
(0x2F82B, 'M', '北'),
(0x2F82C, 'M', '卉'),
(0x2F82D, 'M', '卑'),
(0x2F82E, 'M', '博'),
(0x2F82F, 'M', '即'),
(0x2F830, 'M', '卽'),
(0x2F831, 'M', '卿'),
(0x2F834, 'M', '𠨬'),
(0x2F835, 'M', '灰'),
(0x2F836, 'M', '及'),
(0x2F837, 'M', '叟'),
(0x2F838, 'M', '𠭣'),
(0x2F839, 'M', '叫'),
(0x2F83A, 'M', '叱'),
(0x2F83B, 'M', '吆'),
(0x2F83C, 'M', '咞'),
(0x2F83D, 'M', '吸'),
(0x2F83E, 'M', '呈'),
]
def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F83F, 'M', '周'),
(0x2F840, 'M', '咢'),
(0x2F841, 'M', '哶'),
(0x2F842, 'M', '唐'),
(0x2F843, 'M', '啓'),
(0x2F844, 'M', '啣'),
(0x2F845, 'M', '善'),
(0x2F847, 'M', '喙'),
(0x2F848, 'M', '喫'),
(0x2F849, 'M', '喳'),
(0x2F84A, 'M', '嗂'),
(0x2F84B, 'M', '圖'),
(0x2F84C, 'M', '嘆'),
(0x2F84D, 'M', '圗'),
(0x2F84E, 'M', '噑'),
(0x2F84F, 'M', '噴'),
(0x2F850, 'M', '切'),
(0x2F851, 'M', '壮'),
(0x2F852, 'M', '城'),
(0x2F853, 'M', '埴'),
(0x2F854, 'M', '堍'),
(0x2F855, 'M', '型'),
(0x2F856, 'M', '堲'),
(0x2F857, 'M', '報'),
(0x2F858, 'M', '墬'),
(0x2F859, 'M', '𡓤'),
(0x2F85A, 'M', '売'),
(0x2F85B, 'M', '壷'),
(0x2F85C, 'M', '夆'),
(0x2F85D, 'M', '多'),
(0x2F85E, 'M', '夢'),
(0x2F85F, 'M', '奢'),
(0x2F860, 'M', '𡚨'),
(0x2F861, 'M', '𡛪'),
(0x2F862, 'M', '姬'),
(0x2F863, 'M', '娛'),
(0x2F864, 'M', '娧'),
(0x2F865, 'M', '姘'),
(0x2F866, 'M', '婦'),
(0x2F867, 'M', '㛮'),
(0x2F868, 'X'),
(0x2F869, 'M', '嬈'),
(0x2F86A, 'M', '嬾'),
(0x2F86C, 'M', '𡧈'),
(0x2F86D, 'M', '寃'),
(0x2F86E, 'M', '寘'),
(0x2F86F, 'M', '寧'),
(0x2F870, 'M', '寳'),
(0x2F871, 'M', '𡬘'),
(0x2F872, 'M', '寿'),
(0x2F873, 'M', '将'),
(0x2F874, 'X'),
(0x2F875, 'M', '尢'),
(0x2F876, 'M', '㞁'),
(0x2F877, 'M', '屠'),
(0x2F878, 'M', '屮'),
(0x2F879, 'M', '峀'),
(0x2F87A, 'M', '岍'),
(0x2F87B, 'M', '𡷤'),
(0x2F87C, 'M', '嵃'),
(0x2F87D, 'M', '𡷦'),
(0x2F87E, 'M', '嵮'),
(0x2F87F, 'M', '嵫'),
(0x2F880, 'M', '嵼'),
(0x2F881, 'M', '巡'),
(0x2F882, 'M', '巢'),
(0x2F883, 'M', '㠯'),
(0x2F884, 'M', '巽'),
(0x2F885, 'M', '帨'),
(0x2F886, 'M', '帽'),
(0x2F887, 'M', '幩'),
(0x2F888, 'M', '㡢'),
(0x2F889, 'M', '𢆃'),
(0x2F88A, 'M', '㡼'),
(0x2F88B, 'M', '庰'),
(0x2F88C, 'M', '庳'),
(0x2F88D, 'M', '庶'),
(0x2F88E, 'M', '廊'),
(0x2F88F, 'M', '𪎒'),
(0x2F890, 'M', '廾'),
(0x2F891, 'M', '𢌱'),
(0x2F893, 'M', '舁'),
(0x2F894, 'M', '弢'),
(0x2F896, 'M', '㣇'),
(0x2F897, 'M', '𣊸'),
(0x2F898, 'M', '𦇚'),
(0x2F899, 'M', '形'),
(0x2F89A, 'M', '彫'),
(0x2F89B, 'M', '㣣'),
(0x2F89C, 'M', '徚'),
(0x2F89D, 'M', '忍'),
(0x2F89E, 'M', '志'),
(0x2F89F, 'M', '忹'),
(0x2F8A0, 'M', '悁'),
(0x2F8A1, 'M', '㤺'),
(0x2F8A2, 'M', '㤜'),
(0x2F8A3, 'M', '悔'),
(0x2F8A4, 'M', '𢛔'),
(0x2F8A5, 'M', '惇'),
(0x2F8A6, 'M', '慈'),
]
def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F8A7, 'M', '慌'),
(0x2F8A8, 'M', '慎'),
(0x2F8A9, 'M', '慌'),
(0x2F8AA, 'M', '慺'),
(0x2F8AB, 'M', '憎'),
(0x2F8AC, 'M', '憲'),
(0x2F8AD, 'M', '憤'),
(0x2F8AE, 'M', '憯'),
(0x2F8AF, 'M', '懞'),
(0x2F8B0, 'M', '懲'),
(0x2F8B1, 'M', '懶'),
(0x2F8B2, 'M', '成'),
(0x2F8B3, 'M', '戛'),
(0x2F8B4, 'M', '扝'),
(0x2F8B5, 'M', '抱'),
(0x2F8B6, 'M', '拔'),
(0x2F8B7, 'M', '捐'),
(0x2F8B8, 'M', '𢬌'),
(0x2F8B9, 'M', '挽'),
(0x2F8BA, 'M', '拼'),
(0x2F8BB, 'M', '捨'),
(0x2F8BC, 'M', '掃'),
(0x2F8BD, 'M', '揤'),
(0x2F8BE, 'M', '𢯱'),
(0x2F8BF, 'M', '搢'),
(0x2F8C0, 'M', '揅'),
(0x2F8C1, 'M', '掩'),
(0x2F8C2, 'M', '㨮'),
(0x2F8C3, 'M', '摩'),
(0x2F8C4, 'M', '摾'),
(0x2F8C5, 'M', '撝'),
(0x2F8C6, 'M', '摷'),
(0x2F8C7, 'M', '㩬'),
(0x2F8C8, 'M', '敏'),
(0x2F8C9, 'M', '敬'),
(0x2F8CA, 'M', '𣀊'),
(0x2F8CB, 'M', '旣'),
(0x2F8CC, 'M', '書'),
(0x2F8CD, 'M', '晉'),
(0x2F8CE, 'M', '㬙'),
(0x2F8CF, 'M', '暑'),
(0x2F8D0, 'M', '㬈'),
(0x2F8D1, 'M', '㫤'),
(0x2F8D2, 'M', '冒'),
(0x2F8D3, 'M', '冕'),
(0x2F8D4, 'M', '最'),
(0x2F8D5, 'M', '暜'),
(0x2F8D6, 'M', '肭'),
(0x2F8D7, 'M', '䏙'),
(0x2F8D8, 'M', '朗'),
(0x2F8D9, 'M', '望'),
(0x2F8DA, 'M', '朡'),
(0x2F8DB, 'M', '杞'),
(0x2F8DC, 'M', '杓'),
(0x2F8DD, 'M', '𣏃'),
(0x2F8DE, 'M', '㭉'),
(0x2F8DF, 'M', '柺'),
(0x2F8E0, 'M', '枅'),
(0x2F8E1, 'M', '桒'),
(0x2F8E2, 'M', '梅'),
(0x2F8E3, 'M', '𣑭'),
(0x2F8E4, 'M', '梎'),
(0x2F8E5, 'M', '栟'),
(0x2F8E6, 'M', '椔'),
(0x2F8E7, 'M', '㮝'),
(0x2F8E8, 'M', '楂'),
(0x2F8E9, 'M', '榣'),
(0x2F8EA, 'M', '槪'),
(0x2F8EB, 'M', '檨'),
(0x2F8EC, 'M', '𣚣'),
(0x2F8ED, 'M', '櫛'),
(0x2F8EE, 'M', '㰘'),
(0x2F8EF, 'M', '次'),
(0x2F8F0, 'M', '𣢧'),
(0x2F8F1, 'M', '歔'),
(0x2F8F2, 'M', '㱎'),
(0x2F8F3, 'M', '歲'),
(0x2F8F4, 'M', '殟'),
(0x2F8F5, 'M', '殺'),
(0x2F8F6, 'M', '殻'),
(0x2F8F7, 'M', '𣪍'),
(0x2F8F8, 'M', '𡴋'),
(0x2F8F9, 'M', '𣫺'),
(0x2F8FA, 'M', '汎'),
(0x2F8FB, 'M', '𣲼'),
(0x2F8FC, 'M', '沿'),
(0x2F8FD, 'M', '泍'),
(0x2F8FE, 'M', '汧'),
(0x2F8FF, 'M', '洖'),
(0x2F900, 'M', '派'),
(0x2F901, 'M', '海'),
(0x2F902, 'M', '流'),
(0x2F903, 'M', '浩'),
(0x2F904, 'M', '浸'),
(0x2F905, 'M', '涅'),
(0x2F906, 'M', '𣴞'),
(0x2F907, 'M', '洴'),
(0x2F908, 'M', '港'),
(0x2F909, 'M', '湮'),
(0x2F90A, 'M', '㴳'),
]
def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F90B, 'M', '滋'),
(0x2F90C, 'M', '滇'),
(0x2F90D, 'M', '𣻑'),
(0x2F90E, 'M', '淹'),
(0x2F90F, 'M', '潮'),
(0x2F910, 'M', '𣽞'),
(0x2F911, 'M', '𣾎'),
(0x2F912, 'M', '濆'),
(0x2F913, 'M', '瀹'),
(0x2F914, 'M', '瀞'),
(0x2F915, 'M', '瀛'),
(0x2F916, 'M', '㶖'),
(0x2F917, 'M', '灊'),
(0x2F918, 'M', '災'),
(0x2F919, 'M', '灷'),
(0x2F91A, 'M', '炭'),
(0x2F91B, 'M', '𠔥'),
(0x2F91C, 'M', '煅'),
(0x2F91D, 'M', '𤉣'),
(0x2F91E, 'M', '熜'),
(0x2F91F, 'X'),
(0x2F920, 'M', '爨'),
(0x2F921, 'M', '爵'),
(0x2F922, 'M', '牐'),
(0x2F923, 'M', '𤘈'),
(0x2F924, 'M', '犀'),
(0x2F925, 'M', '犕'),
(0x2F926, 'M', '𤜵'),
(0x2F927, 'M', '𤠔'),
(0x2F928, 'M', '獺'),
(0x2F929, 'M', '王'),
(0x2F92A, 'M', '㺬'),
(0x2F92B, 'M', '玥'),
(0x2F92C, 'M', '㺸'),
(0x2F92E, 'M', '瑇'),
(0x2F92F, 'M', '瑜'),
(0x2F930, 'M', '瑱'),
(0x2F931, 'M', '璅'),
(0x2F932, 'M', '瓊'),
(0x2F933, 'M', '㼛'),
(0x2F934, 'M', '甤'),
(0x2F935, 'M', '𤰶'),
(0x2F936, 'M', '甾'),
(0x2F937, 'M', '𤲒'),
(0x2F938, 'M', '異'),
(0x2F939, 'M', '𢆟'),
(0x2F93A, 'M', '瘐'),
(0x2F93B, 'M', '𤾡'),
(0x2F93C, 'M', '𤾸'),
(0x2F93D, 'M', '𥁄'),
(0x2F93E, 'M', '㿼'),
(0x2F93F, 'M', '䀈'),
(0x2F940, 'M', '直'),
(0x2F941, 'M', '𥃳'),
(0x2F942, 'M', '𥃲'),
(0x2F943, 'M', '𥄙'),
(0x2F944, 'M', '𥄳'),
(0x2F945, 'M', '眞'),
(0x2F946, 'M', '真'),
(0x2F948, 'M', '睊'),
(0x2F949, 'M', '䀹'),
(0x2F94A, 'M', '瞋'),
(0x2F94B, 'M', '䁆'),
(0x2F94C, 'M', '䂖'),
(0x2F94D, 'M', '𥐝'),
(0x2F94E, 'M', '硎'),
(0x2F94F, 'M', '碌'),
(0x2F950, 'M', '磌'),
(0x2F951, 'M', '䃣'),
(0x2F952, 'M', '𥘦'),
(0x2F953, 'M', '祖'),
(0x2F954, 'M', '𥚚'),
(0x2F955, 'M', '𥛅'),
(0x2F956, 'M', '福'),
(0x2F957, 'M', '秫'),
(0x2F958, 'M', '䄯'),
(0x2F959, 'M', '穀'),
(0x2F95A, 'M', '穊'),
(0x2F95B, 'M', '穏'),
(0x2F95C, 'M', '𥥼'),
(0x2F95D, 'M', '𥪧'),
(0x2F95F, 'X'),
(0x2F960, 'M', '䈂'),
(0x2F961, 'M', '𥮫'),
(0x2F962, 'M', '篆'),
(0x2F963, 'M', '築'),
(0x2F964, 'M', '䈧'),
(0x2F965, 'M', '𥲀'),
(0x2F966, 'M', '糒'),
(0x2F967, 'M', '䊠'),
(0x2F968, 'M', '糨'),
(0x2F969, 'M', '糣'),
(0x2F96A, 'M', '紀'),
(0x2F96B, 'M', '𥾆'),
(0x2F96C, 'M', '絣'),
(0x2F96D, 'M', '䌁'),
(0x2F96E, 'M', '緇'),
(0x2F96F, 'M', '縂'),
(0x2F970, 'M', '繅'),
(0x2F971, 'M', '䌴'),
]
def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F972, 'M', '𦈨'),
(0x2F973, 'M', '𦉇'),
(0x2F974, 'M', '䍙'),
(0x2F975, 'M', '𦋙'),
(0x2F976, 'M', '罺'),
(0x2F977, 'M', '𦌾'),
(0x2F978, 'M', '羕'),
(0x2F979, 'M', '翺'),
(0x2F97A, 'M', '者'),
(0x2F97B, 'M', '𦓚'),
(0x2F97C, 'M', '𦔣'),
(0x2F97D, 'M', '聠'),
(0x2F97E, 'M', '𦖨'),
(0x2F97F, 'M', '聰'),
(0x2F980, 'M', '𣍟'),
(0x2F981, 'M', '䏕'),
(0x2F982, 'M', '育'),
(0x2F983, 'M', '脃'),
(0x2F984, 'M', '䐋'),
(0x2F985, 'M', '脾'),
(0x2F986, 'M', '媵'),
(0x2F987, 'M', '𦞧'),
(0x2F988, 'M', '𦞵'),
(0x2F989, 'M', '𣎓'),
(0x2F98A, 'M', '𣎜'),
(0x2F98B, 'M', '舁'),
(0x2F98C, 'M', '舄'),
(0x2F98D, 'M', '辞'),
(0x2F98E, 'M', '䑫'),
(0x2F98F, 'M', '芑'),
(0x2F990, 'M', '芋'),
(0x2F991, 'M', '芝'),
(0x2F992, 'M', '劳'),
(0x2F993, 'M', '花'),
(0x2F994, 'M', '芳'),
(0x2F995, 'M', '芽'),
(0x2F996, 'M', '苦'),
(0x2F997, 'M', '𦬼'),
(0x2F998, 'M', '若'),
(0x2F999, 'M', '茝'),
(0x2F99A, 'M', '荣'),
(0x2F99B, 'M', '莭'),
(0x2F99C, 'M', '茣'),
(0x2F99D, 'M', '莽'),
(0x2F99E, 'M', '菧'),
(0x2F99F, 'M', '著'),
(0x2F9A0, 'M', '荓'),
(0x2F9A1, 'M', '菊'),
(0x2F9A2, 'M', '菌'),
(0x2F9A3, 'M', '菜'),
(0x2F9A4, 'M', '𦰶'),
(0x2F9A5, 'M', '𦵫'),
(0x2F9A6, 'M', '𦳕'),
(0x2F9A7, 'M', '䔫'),
(0x2F9A8, 'M', '蓱'),
(0x2F9A9, 'M', '蓳'),
(0x2F9AA, 'M', '蔖'),
(0x2F9AB, 'M', '𧏊'),
(0x2F9AC, 'M', '蕤'),
(0x2F9AD, 'M', '𦼬'),
(0x2F9AE, 'M', '䕝'),
(0x2F9AF, 'M', '䕡'),
(0x2F9B0, 'M', '𦾱'),
(0x2F9B1, 'M', '𧃒'),
(0x2F9B2, 'M', '䕫'),
(0x2F9B3, 'M', '虐'),
(0x2F9B4, 'M', '虜'),
(0x2F9B5, 'M', '虧'),
(0x2F9B6, 'M', '虩'),
(0x2F9B7, 'M', '蚩'),
(0x2F9B8, 'M', '蚈'),
(0x2F9B9, 'M', '蜎'),
(0x2F9BA, 'M', '蛢'),
(0x2F9BB, 'M', '蝹'),
(0x2F9BC, 'M', '蜨'),
(0x2F9BD, 'M', '蝫'),
(0x2F9BE, 'M', '螆'),
(0x2F9BF, 'X'),
(0x2F9C0, 'M', '蟡'),
(0x2F9C1, 'M', '蠁'),
(0x2F9C2, 'M', '䗹'),
(0x2F9C3, 'M', '衠'),
(0x2F9C4, 'M', '衣'),
(0x2F9C5, 'M', '𧙧'),
(0x2F9C6, 'M', '裗'),
(0x2F9C7, 'M', '裞'),
(0x2F9C8, 'M', '䘵'),
(0x2F9C9, 'M', '裺'),
(0x2F9CA, 'M', '㒻'),
(0x2F9CB, 'M', '𧢮'),
(0x2F9CC, 'M', '𧥦'),
(0x2F9CD, 'M', '䚾'),
(0x2F9CE, 'M', '䛇'),
(0x2F9CF, 'M', '誠'),
(0x2F9D0, 'M', '諭'),
(0x2F9D1, 'M', '變'),
(0x2F9D2, 'M', '豕'),
(0x2F9D3, 'M', '𧲨'),
(0x2F9D4, 'M', '貫'),
(0x2F9D5, 'M', '賁'),
]
def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
return [
(0x2F9D6, 'M', '贛'),
(0x2F9D7, 'M', '起'),
(0x2F9D8, 'M', '𧼯'),
(0x2F9D9, 'M', '𠠄'),
(0x2F9DA, 'M', '跋'),
(0x2F9DB, 'M', '趼'),
(0x2F9DC, 'M', '跰'),
(0x2F9DD, 'M', '𠣞'),
(0x2F9DE, 'M', '軔'),
(0x2F9DF, 'M', '輸'),
(0x2F9E0, 'M', '𨗒'),
(0x2F9E1, 'M', '𨗭'),
(0x2F9E2, 'M', '邔'),
(0x2F9E3, 'M', '郱'),
(0x2F9E4, 'M', '鄑'),
(0x2F9E5, 'M', '𨜮'),
(0x2F9E6, 'M', '鄛'),
(0x2F9E7, 'M', '鈸'),
(0x2F9E8, 'M', '鋗'),
(0x2F9E9, 'M', '鋘'),
(0x2F9EA, 'M', '鉼'),
(0x2F9EB, 'M', '鏹'),
(0x2F9EC, 'M', '鐕'),
(0x2F9ED, 'M', '𨯺'),
(0x2F9EE, 'M', '開'),
(0x2F9EF, 'M', '䦕'),
(0x2F9F0, 'M', '閷'),
(0x2F9F1, 'M', '𨵷'),
(0x2F9F2, 'M', '䧦'),
(0x2F9F3, 'M', '雃'),
(0x2F9F4, 'M', '嶲'),
(0x2F9F5, 'M', '霣'),
(0x2F9F6, 'M', '𩅅'),
(0x2F9F7, 'M', '𩈚'),
(0x2F9F8, 'M', '䩮'),
(0x2F9F9, 'M', '䩶'),
(0x2F9FA, 'M', '韠'),
(0x2F9FB, 'M', '𩐊'),
(0x2F9FC, 'M', '䪲'),
(0x2F9FD, 'M', '𩒖'),
(0x2F9FE, 'M', '頋'),
(0x2FA00, 'M', '頩'),
(0x2FA01, 'M', '𩖶'),
(0x2FA02, 'M', '飢'),
(0x2FA03, 'M', '䬳'),
(0x2FA04, 'M', '餩'),
(0x2FA05, 'M', '馧'),
(0x2FA06, 'M', '駂'),
(0x2FA07, 'M', '駾'),
(0x2FA08, 'M', '䯎'),
(0x2FA09, 'M', '𩬰'),
(0x2FA0A, 'M', '鬒'),
(0x2FA0B, 'M', '鱀'),
(0x2FA0C, 'M', '鳽'),
(0x2FA0D, 'M', '䳎'),
(0x2FA0E, 'M', '䳭'),
(0x2FA0F, 'M', '鵧'),
(0x2FA10, 'M', '𪃎'),
(0x2FA11, 'M', '䳸'),
(0x2FA12, 'M', '𪄅'),
(0x2FA13, 'M', '𪈎'),
(0x2FA14, 'M', '𪊑'),
(0x2FA15, 'M', '麻'),
(0x2FA16, 'M', '䵖'),
(0x2FA17, 'M', '黹'),
(0x2FA18, 'M', '黾'),
(0x2FA19, 'M', '鼅'),
(0x2FA1A, 'M', '鼏'),
(0x2FA1B, 'M', '鼖'),
(0x2FA1C, 'M', '鼻'),
(0x2FA1D, 'M', '𪘀'),
(0x2FA1E, 'X'),
(0x30000, 'V'),
(0x3134B, 'X'),
(0x31350, 'V'),
(0x323B0, 'X'),
(0xE0100, 'I'),
(0xE01F0, 'X'),
]
uts46data = tuple(
_seg_0()
+ _seg_1()
+ _seg_2()
+ _seg_3()
+ _seg_4()
+ _seg_5()
+ _seg_6()
+ _seg_7()
+ _seg_8()
+ _seg_9()
+ _seg_10()
+ _seg_11()
+ _seg_12()
+ _seg_13()
+ _seg_14()
+ _seg_15()
+ _seg_16()
+ _seg_17()
+ _seg_18()
+ _seg_19()
+ _seg_20()
+ _seg_21()
+ _seg_22()
+ _seg_23()
+ _seg_24()
+ _seg_25()
+ _seg_26()
+ _seg_27()
+ _seg_28()
+ _seg_29()
+ _seg_30()
+ _seg_31()
+ _seg_32()
+ _seg_33()
+ _seg_34()
+ _seg_35()
+ _seg_36()
+ _seg_37()
+ _seg_38()
+ _seg_39()
+ _seg_40()
+ _seg_41()
+ _seg_42()
+ _seg_43()
+ _seg_44()
+ _seg_45()
+ _seg_46()
+ _seg_47()
+ _seg_48()
+ _seg_49()
+ _seg_50()
+ _seg_51()
+ _seg_52()
+ _seg_53()
+ _seg_54()
+ _seg_55()
+ _seg_56()
+ _seg_57()
+ _seg_58()
+ _seg_59()
+ _seg_60()
+ _seg_61()
+ _seg_62()
+ _seg_63()
+ _seg_64()
+ _seg_65()
+ _seg_66()
+ _seg_67()
+ _seg_68()
+ _seg_69()
+ _seg_70()
+ _seg_71()
+ _seg_72()
+ _seg_73()
+ _seg_74()
+ _seg_75()
+ _seg_76()
+ _seg_77()
+ _seg_78()
+ _seg_79()
+ _seg_80()
+ _seg_81()
) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...]
| 197,261 | Python | 21.934775 | 68 | 0.354708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna/compat.py | from .core import *
from .codec import *
from typing import Any, Union
def ToASCII(label: str) -> bytes:
return encode(label)
def ToUnicode(label: Union[bytes, bytearray]) -> str:
return decode(label)
def nameprep(s: Any) -> None:
raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol')
| 321 | Python | 21.999998 | 77 | 0.716511 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/envelope.py | import io
import json
import mimetypes
from sentry_sdk._compat import text_type, PY2
from sentry_sdk._types import MYPY
from sentry_sdk.session import Session
from sentry_sdk.utils import json_dumps, capture_internal_exceptions
if MYPY:
from typing import Any
from typing import Optional
from typing import Union
from typing import Dict
from typing import List
from typing import Iterator
from sentry_sdk._types import Event, EventDataCategory
def parse_json(data):
# type: (Union[bytes, text_type]) -> Any
# on some python 3 versions this needs to be bytes
if not PY2 and isinstance(data, bytes):
data = data.decode("utf-8", "replace")
return json.loads(data)
class Envelope(object):
def __init__(
self,
headers=None, # type: Optional[Dict[str, Any]]
items=None, # type: Optional[List[Item]]
):
# type: (...) -> None
if headers is not None:
headers = dict(headers)
self.headers = headers or {}
if items is None:
items = []
else:
items = list(items)
self.items = items
@property
def description(self):
# type: (...) -> str
return "envelope with %s items (%s)" % (
len(self.items),
", ".join(x.data_category for x in self.items),
)
def add_event(
self, event # type: Event
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=event), type="event"))
def add_transaction(
self, transaction # type: Event
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=transaction), type="transaction"))
def add_profile(
self, profile # type: Any
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=profile), type="profile"))
def add_session(
self, session # type: Union[Session, Any]
):
# type: (...) -> None
if isinstance(session, Session):
session = session.to_json()
self.add_item(Item(payload=PayloadRef(json=session), type="session"))
def add_sessions(
self, sessions # type: Any
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=sessions), type="sessions"))
def add_item(
self, item # type: Item
):
# type: (...) -> None
self.items.append(item)
def get_event(self):
# type: (...) -> Optional[Event]
for items in self.items:
event = items.get_event()
if event is not None:
return event
return None
def get_transaction_event(self):
# type: (...) -> Optional[Event]
for item in self.items:
event = item.get_transaction_event()
if event is not None:
return event
return None
def __iter__(self):
# type: (...) -> Iterator[Item]
return iter(self.items)
def serialize_into(
self, f # type: Any
):
# type: (...) -> None
f.write(json_dumps(self.headers))
f.write(b"\n")
for item in self.items:
item.serialize_into(f)
def serialize(self):
# type: (...) -> bytes
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(
cls, f # type: Any
):
# type: (...) -> Envelope
headers = parse_json(f.readline())
items = []
while 1:
item = Item.deserialize_from(f)
if item is None:
break
items.append(item)
return cls(headers=headers, items=items)
@classmethod
def deserialize(
cls, bytes # type: bytes
):
# type: (...) -> Envelope
return cls.deserialize_from(io.BytesIO(bytes))
def __repr__(self):
# type: (...) -> str
return "<Envelope headers=%r items=%r>" % (self.headers, self.items)
class PayloadRef(object):
def __init__(
self,
bytes=None, # type: Optional[bytes]
path=None, # type: Optional[Union[bytes, text_type]]
json=None, # type: Optional[Any]
):
# type: (...) -> None
self.json = json
self.bytes = bytes
self.path = path
def get_bytes(self):
# type: (...) -> bytes
if self.bytes is None:
if self.path is not None:
with capture_internal_exceptions():
with open(self.path, "rb") as f:
self.bytes = f.read()
elif self.json is not None:
self.bytes = json_dumps(self.json)
else:
self.bytes = b""
return self.bytes
@property
def inferred_content_type(self):
# type: (...) -> str
if self.json is not None:
return "application/json"
elif self.path is not None:
path = self.path
if isinstance(path, bytes):
path = path.decode("utf-8", "replace")
ty = mimetypes.guess_type(path)[0]
if ty:
return ty
return "application/octet-stream"
def __repr__(self):
# type: (...) -> str
return "<Payload %r>" % (self.inferred_content_type,)
class Item(object):
def __init__(
self,
payload, # type: Union[bytes, text_type, PayloadRef]
headers=None, # type: Optional[Dict[str, Any]]
type=None, # type: Optional[str]
content_type=None, # type: Optional[str]
filename=None, # type: Optional[str]
):
if headers is not None:
headers = dict(headers)
elif headers is None:
headers = {}
self.headers = headers
if isinstance(payload, bytes):
payload = PayloadRef(bytes=payload)
elif isinstance(payload, text_type):
payload = PayloadRef(bytes=payload.encode("utf-8"))
else:
payload = payload
if filename is not None:
headers["filename"] = filename
if type is not None:
headers["type"] = type
if content_type is not None:
headers["content_type"] = content_type
elif "content_type" not in headers:
headers["content_type"] = payload.inferred_content_type
self.payload = payload
def __repr__(self):
# type: (...) -> str
return "<Item headers=%r payload=%r data_category=%r>" % (
self.headers,
self.payload,
self.data_category,
)
@property
def type(self):
# type: (...) -> Optional[str]
return self.headers.get("type")
@property
def data_category(self):
# type: (...) -> EventDataCategory
ty = self.headers.get("type")
if ty == "session":
return "session"
elif ty == "attachment":
return "attachment"
elif ty == "transaction":
return "transaction"
elif ty == "event":
return "error"
elif ty == "client_report":
return "internal"
elif ty == "profile":
return "profile"
else:
return "default"
def get_bytes(self):
# type: (...) -> bytes
return self.payload.get_bytes()
def get_event(self):
# type: (...) -> Optional[Event]
"""
Returns an error event if there is one.
"""
if self.type == "event" and self.payload.json is not None:
return self.payload.json
return None
def get_transaction_event(self):
# type: (...) -> Optional[Event]
if self.type == "transaction" and self.payload.json is not None:
return self.payload.json
return None
def serialize_into(
self, f # type: Any
):
# type: (...) -> None
headers = dict(self.headers)
bytes = self.get_bytes()
headers["length"] = len(bytes)
f.write(json_dumps(headers))
f.write(b"\n")
f.write(bytes)
f.write(b"\n")
def serialize(self):
# type: (...) -> bytes
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(
cls, f # type: Any
):
# type: (...) -> Optional[Item]
line = f.readline().rstrip()
if not line:
return None
headers = parse_json(line)
length = headers.get("length")
if length is not None:
payload = f.read(length)
f.readline()
else:
# if no length was specified we need to read up to the end of line
# and remove it (if it is present, i.e. not the very last char in an eof terminated envelope)
payload = f.readline().rstrip(b"\n")
if headers.get("type") in ("event", "transaction", "metric_buckets"):
rv = cls(headers=headers, payload=PayloadRef(json=parse_json(payload)))
else:
rv = cls(headers=headers, payload=payload)
return rv
@classmethod
def deserialize(
cls, bytes # type: bytes
):
# type: (...) -> Optional[Item]
return cls.deserialize_from(io.BytesIO(bytes))
| 9,400 | Python | 27.837423 | 105 | 0.528936 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/tracing.py | import uuid
import random
import threading
import time
from datetime import datetime, timedelta
import sentry_sdk
from sentry_sdk.consts import INSTRUMENTER
from sentry_sdk.utils import logger
from sentry_sdk._types import MYPY
if MYPY:
import typing
from typing import Optional
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Iterator
import sentry_sdk.profiler
from sentry_sdk._types import Event, SamplingContext, MeasurementUnit
BAGGAGE_HEADER_NAME = "baggage"
SENTRY_TRACE_HEADER_NAME = "sentry-trace"
# Transaction source
# see https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-annotations
TRANSACTION_SOURCE_CUSTOM = "custom"
TRANSACTION_SOURCE_URL = "url"
TRANSACTION_SOURCE_ROUTE = "route"
TRANSACTION_SOURCE_VIEW = "view"
TRANSACTION_SOURCE_COMPONENT = "component"
TRANSACTION_SOURCE_TASK = "task"
# These are typically high cardinality and the server hates them
LOW_QUALITY_TRANSACTION_SOURCES = [
TRANSACTION_SOURCE_URL,
]
SOURCE_FOR_STYLE = {
"endpoint": TRANSACTION_SOURCE_COMPONENT,
"function_name": TRANSACTION_SOURCE_COMPONENT,
"handler_name": TRANSACTION_SOURCE_COMPONENT,
"method_and_path_pattern": TRANSACTION_SOURCE_ROUTE,
"path": TRANSACTION_SOURCE_URL,
"route_name": TRANSACTION_SOURCE_COMPONENT,
"route_pattern": TRANSACTION_SOURCE_ROUTE,
"uri_template": TRANSACTION_SOURCE_ROUTE,
"url": TRANSACTION_SOURCE_ROUTE,
}
class _SpanRecorder(object):
"""Limits the number of spans recorded in a transaction."""
__slots__ = ("maxlen", "spans")
def __init__(self, maxlen):
# type: (int) -> None
# FIXME: this is `maxlen - 1` only to preserve historical behavior
# enforced by tests.
# Either this should be changed to `maxlen` or the JS SDK implementation
# should be changed to match a consistent interpretation of what maxlen
# limits: either transaction+spans or only child spans.
self.maxlen = maxlen - 1
self.spans = [] # type: List[Span]
def add(self, span):
# type: (Span) -> None
if len(self.spans) > self.maxlen:
span._span_recorder = None
else:
self.spans.append(span)
class Span(object):
__slots__ = (
"trace_id",
"span_id",
"parent_span_id",
"same_process_as_parent",
"sampled",
"op",
"description",
"start_timestamp",
"_start_timestamp_monotonic",
"status",
"timestamp",
"_tags",
"_data",
"_span_recorder",
"hub",
"_context_manager_state",
"_containing_transaction",
)
def __new__(cls, **kwargs):
# type: (**Any) -> Any
"""
Backwards-compatible implementation of Span and Transaction
creation.
"""
# TODO: consider removing this in a future release.
# This is for backwards compatibility with releases before Transaction
# existed, to allow for a smoother transition.
if "transaction" in kwargs:
return object.__new__(Transaction)
return object.__new__(cls)
def __init__(
self,
trace_id=None, # type: Optional[str]
span_id=None, # type: Optional[str]
parent_span_id=None, # type: Optional[str]
same_process_as_parent=True, # type: bool
sampled=None, # type: Optional[bool]
op=None, # type: Optional[str]
description=None, # type: Optional[str]
hub=None, # type: Optional[sentry_sdk.Hub]
status=None, # type: Optional[str]
transaction=None, # type: Optional[str] # deprecated
containing_transaction=None, # type: Optional[Transaction]
start_timestamp=None, # type: Optional[datetime]
):
# type: (...) -> None
self.trace_id = trace_id or uuid.uuid4().hex
self.span_id = span_id or uuid.uuid4().hex[16:]
self.parent_span_id = parent_span_id
self.same_process_as_parent = same_process_as_parent
self.sampled = sampled
self.op = op
self.description = description
self.status = status
self.hub = hub
self._tags = {} # type: Dict[str, str]
self._data = {} # type: Dict[str, Any]
self._containing_transaction = containing_transaction
self.start_timestamp = start_timestamp or datetime.utcnow()
try:
# TODO: For Python 3.7+, we could use a clock with ns resolution:
# self._start_timestamp_monotonic = time.perf_counter_ns()
# Python 3.3+
self._start_timestamp_monotonic = time.perf_counter()
except AttributeError:
pass
#: End timestamp of span
self.timestamp = None # type: Optional[datetime]
self._span_recorder = None # type: Optional[_SpanRecorder]
# TODO this should really live on the Transaction class rather than the Span
# class
def init_span_recorder(self, maxlen):
# type: (int) -> None
if self._span_recorder is None:
self._span_recorder = _SpanRecorder(maxlen)
def __repr__(self):
# type: () -> str
return (
"<%s(op=%r, description:%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r)>"
% (
self.__class__.__name__,
self.op,
self.description,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
)
)
def __enter__(self):
# type: () -> Span
hub = self.hub or sentry_sdk.Hub.current
_, scope = hub._stack[-1]
old_span = scope.span
scope.span = self
self._context_manager_state = (hub, scope, old_span)
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
if value is not None:
self.set_status("internal_error")
hub, scope, old_span = self._context_manager_state
del self._context_manager_state
self.finish(hub)
scope.span = old_span
@property
def containing_transaction(self):
# type: () -> Optional[Transaction]
# this is a getter rather than a regular attribute so that transactions
# can return `self` here instead (as a way to prevent them circularly
# referencing themselves)
return self._containing_transaction
def start_child(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
# type: (str, **Any) -> Span
"""
Start a sub-span from the current span or transaction.
Takes the same arguments as the initializer of :py:class:`Span`. The
trace id, sampling decision, transaction pointer, and span recorder are
inherited from the current span/transaction.
"""
hub = self.hub or sentry_sdk.Hub.current
client = hub.client
configuration_instrumenter = client and client.options["instrumenter"]
if instrumenter != configuration_instrumenter:
return NoOpSpan()
kwargs.setdefault("sampled", self.sampled)
child = Span(
trace_id=self.trace_id,
parent_span_id=self.span_id,
containing_transaction=self.containing_transaction,
**kwargs
)
span_recorder = (
self.containing_transaction and self.containing_transaction._span_recorder
)
if span_recorder:
span_recorder.add(child)
return child
def new_span(self, **kwargs):
# type: (**Any) -> Span
"""Deprecated: use start_child instead."""
logger.warning("Deprecated: use Span.start_child instead of Span.new_span.")
return self.start_child(**kwargs)
@classmethod
def continue_from_environ(
cls,
environ, # type: typing.Mapping[str, str]
**kwargs # type: Any
):
# type: (...) -> Transaction
"""
Create a Transaction with the given params, then add in data pulled from
the 'sentry-trace', 'baggage' and 'tracestate' headers from the environ (if any)
before returning the Transaction.
This is different from `continue_from_headers` in that it assumes header
names in the form "HTTP_HEADER_NAME" - such as you would get from a wsgi
environ - rather than the form "header-name".
"""
if cls is Span:
logger.warning(
"Deprecated: use Transaction.continue_from_environ "
"instead of Span.continue_from_environ."
)
return Transaction.continue_from_headers(EnvironHeaders(environ), **kwargs)
@classmethod
def continue_from_headers(
cls,
headers, # type: typing.Mapping[str, str]
**kwargs # type: Any
):
# type: (...) -> Transaction
"""
Create a transaction with the given params (including any data pulled from
the 'sentry-trace', 'baggage' and 'tracestate' headers).
"""
# TODO move this to the Transaction class
if cls is Span:
logger.warning(
"Deprecated: use Transaction.continue_from_headers "
"instead of Span.continue_from_headers."
)
# TODO-neel move away from this kwargs stuff, it's confusing and opaque
# make more explicit
baggage = Baggage.from_incoming_header(headers.get(BAGGAGE_HEADER_NAME))
kwargs.update({BAGGAGE_HEADER_NAME: baggage})
sentrytrace_kwargs = extract_sentrytrace_data(
headers.get(SENTRY_TRACE_HEADER_NAME)
)
if sentrytrace_kwargs is not None:
kwargs.update(sentrytrace_kwargs)
# If there's an incoming sentry-trace but no incoming baggage header,
# for instance in traces coming from older SDKs,
# baggage will be empty and immutable and won't be populated as head SDK.
baggage.freeze()
kwargs.update(extract_tracestate_data(headers.get("tracestate")))
transaction = Transaction(**kwargs)
transaction.same_process_as_parent = False
return transaction
def iter_headers(self):
# type: () -> Iterator[Tuple[str, str]]
"""
Creates a generator which returns the span's `sentry-trace`, `baggage` and
`tracestate` headers.
If the span's containing transaction doesn't yet have a
`sentry_tracestate` value, this will cause one to be generated and
stored.
"""
yield SENTRY_TRACE_HEADER_NAME, self.to_traceparent()
tracestate = self.to_tracestate() if has_tracestate_enabled(self) else None
# `tracestate` will only be `None` if there's no client or no DSN
# TODO (kmclb) the above will be true once the feature is no longer
# behind a flag
if tracestate:
yield "tracestate", tracestate
if self.containing_transaction:
baggage = self.containing_transaction.get_baggage().serialize()
if baggage:
yield BAGGAGE_HEADER_NAME, baggage
@classmethod
def from_traceparent(
cls,
traceparent, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional[Transaction]
"""
DEPRECATED: Use Transaction.continue_from_headers(headers, **kwargs)
Create a Transaction with the given params, then add in data pulled from
the given 'sentry-trace' header value before returning the Transaction.
"""
logger.warning(
"Deprecated: Use Transaction.continue_from_headers(headers, **kwargs) "
"instead of from_traceparent(traceparent, **kwargs)"
)
if not traceparent:
return None
return cls.continue_from_headers(
{SENTRY_TRACE_HEADER_NAME: traceparent}, **kwargs
)
def to_traceparent(self):
# type: () -> str
sampled = ""
if self.sampled is True:
sampled = "1"
if self.sampled is False:
sampled = "0"
return "%s-%s-%s" % (self.trace_id, self.span_id, sampled)
def to_tracestate(self):
# type: () -> Optional[str]
"""
Computes the `tracestate` header value using data from the containing
transaction.
If the containing transaction doesn't yet have a `sentry_tracestate`
value, this will cause one to be generated and stored.
If there is no containing transaction, a value will be generated but not
stored.
Returns None if there's no client and/or no DSN.
"""
sentry_tracestate = self.get_or_set_sentry_tracestate()
third_party_tracestate = (
self.containing_transaction._third_party_tracestate
if self.containing_transaction
else None
)
if not sentry_tracestate:
return None
header_value = sentry_tracestate
if third_party_tracestate:
header_value = header_value + "," + third_party_tracestate
return header_value
def get_or_set_sentry_tracestate(self):
# type: (Span) -> Optional[str]
"""
Read sentry tracestate off of the span's containing transaction.
If the transaction doesn't yet have a `_sentry_tracestate` value,
compute one and store it.
"""
transaction = self.containing_transaction
if transaction:
if not transaction._sentry_tracestate:
transaction._sentry_tracestate = compute_tracestate_entry(self)
return transaction._sentry_tracestate
# orphan span - nowhere to store the value, so just return it
return compute_tracestate_entry(self)
def set_tag(self, key, value):
# type: (str, Any) -> None
self._tags[key] = value
def set_data(self, key, value):
# type: (str, Any) -> None
self._data[key] = value
def set_status(self, value):
# type: (str) -> None
self.status = value
def set_http_status(self, http_status):
# type: (int) -> None
self.set_tag("http.status_code", str(http_status))
if http_status < 400:
self.set_status("ok")
elif 400 <= http_status < 500:
if http_status == 403:
self.set_status("permission_denied")
elif http_status == 404:
self.set_status("not_found")
elif http_status == 429:
self.set_status("resource_exhausted")
elif http_status == 413:
self.set_status("failed_precondition")
elif http_status == 401:
self.set_status("unauthenticated")
elif http_status == 409:
self.set_status("already_exists")
else:
self.set_status("invalid_argument")
elif 500 <= http_status < 600:
if http_status == 504:
self.set_status("deadline_exceeded")
elif http_status == 501:
self.set_status("unimplemented")
elif http_status == 503:
self.set_status("unavailable")
else:
self.set_status("internal_error")
else:
self.set_status("unknown_error")
def is_success(self):
# type: () -> bool
return self.status == "ok"
def finish(self, hub=None, end_timestamp=None):
# type: (Optional[sentry_sdk.Hub], Optional[datetime]) -> Optional[str]
# XXX: would be type: (Optional[sentry_sdk.Hub]) -> None, but that leads
# to incompatible return types for Span.finish and Transaction.finish.
if self.timestamp is not None:
# This span is already finished, ignore.
return None
hub = hub or self.hub or sentry_sdk.Hub.current
try:
if end_timestamp:
self.timestamp = end_timestamp
else:
duration_seconds = time.perf_counter() - self._start_timestamp_monotonic
self.timestamp = self.start_timestamp + timedelta(
seconds=duration_seconds
)
except AttributeError:
self.timestamp = datetime.utcnow()
maybe_create_breadcrumbs_from_span(hub, self)
return None
def to_json(self):
# type: () -> Dict[str, Any]
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"same_process_as_parent": self.same_process_as_parent,
"op": self.op,
"description": self.description,
"start_timestamp": self.start_timestamp,
"timestamp": self.timestamp,
} # type: Dict[str, Any]
if self.status:
self._tags["status"] = self.status
tags = self._tags
if tags:
rv["tags"] = tags
data = self._data
if data:
rv["data"] = data
return rv
def get_trace_context(self):
# type: () -> Any
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"op": self.op,
"description": self.description,
} # type: Dict[str, Any]
if self.status:
rv["status"] = self.status
# if the transaction didn't inherit a tracestate value, and no outgoing
# requests - whose need for headers would have caused a tracestate value
# to be created - were made as part of the transaction, the transaction
# still won't have a tracestate value, so compute one now
sentry_tracestate = self.get_or_set_sentry_tracestate()
if sentry_tracestate:
rv["tracestate"] = sentry_tracestate
if self.containing_transaction:
rv[
"dynamic_sampling_context"
] = self.containing_transaction.get_baggage().dynamic_sampling_context()
return rv
class Transaction(Span):
__slots__ = (
"name",
"source",
"parent_sampled",
# used to create baggage value for head SDKs in dynamic sampling
"sample_rate",
# the sentry portion of the `tracestate` header used to transmit
# correlation context for server-side dynamic sampling, of the form
# `sentry=xxxxx`, where `xxxxx` is the base64-encoded json of the
# correlation context data, missing trailing any =
"_sentry_tracestate",
# tracestate data from other vendors, of the form `dogs=yes,cats=maybe`
"_third_party_tracestate",
"_measurements",
"_contexts",
"_profile",
"_baggage",
"_active_thread_id",
)
def __init__(
self,
name="", # type: str
parent_sampled=None, # type: Optional[bool]
sentry_tracestate=None, # type: Optional[str]
third_party_tracestate=None, # type: Optional[str]
baggage=None, # type: Optional[Baggage]
source=TRANSACTION_SOURCE_CUSTOM, # type: str
**kwargs # type: Any
):
# type: (...) -> None
# TODO: consider removing this in a future release.
# This is for backwards compatibility with releases before Transaction
# existed, to allow for a smoother transition.
if not name and "transaction" in kwargs:
logger.warning(
"Deprecated: use Transaction(name=...) to create transactions "
"instead of Span(transaction=...)."
)
name = kwargs.pop("transaction")
Span.__init__(self, **kwargs)
self.name = name
self.source = source
self.sample_rate = None # type: Optional[float]
self.parent_sampled = parent_sampled
# if tracestate isn't inherited and set here, it will get set lazily,
# either the first time an outgoing request needs it for a header or the
# first time an event needs it for inclusion in the captured data
self._sentry_tracestate = sentry_tracestate
self._third_party_tracestate = third_party_tracestate
self._measurements = {} # type: Dict[str, Any]
self._contexts = {} # type: Dict[str, Any]
self._profile = None # type: Optional[sentry_sdk.profiler.Profile]
self._baggage = baggage
# for profiling, we want to know on which thread a transaction is started
# to accurately show the active thread in the UI
self._active_thread_id = (
threading.current_thread().ident
) # used by profiling.py
def __repr__(self):
# type: () -> str
return (
"<%s(name=%r, op=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r, source=%r)>"
% (
self.__class__.__name__,
self.name,
self.op,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
self.source,
)
)
@property
def containing_transaction(self):
# type: () -> Transaction
# Transactions (as spans) belong to themselves (as transactions). This
# is a getter rather than a regular attribute to avoid having a circular
# reference.
return self
def finish(self, hub=None, end_timestamp=None):
# type: (Optional[sentry_sdk.Hub], Optional[datetime]) -> Optional[str]
if self.timestamp is not None:
# This transaction is already finished, ignore.
return None
hub = hub or self.hub or sentry_sdk.Hub.current
client = hub.client
if client is None:
# We have no client and therefore nowhere to send this transaction.
return None
# This is a de facto proxy for checking if sampled = False
if self._span_recorder is None:
logger.debug("Discarding transaction because sampled = False")
# This is not entirely accurate because discards here are not
# exclusively based on sample rate but also traces sampler, but
# we handle this the same here.
if client.transport and has_tracing_enabled(client.options):
client.transport.record_lost_event(
"sample_rate", data_category="transaction"
)
return None
if not self.name:
logger.warning(
"Transaction has no name, falling back to `<unlabeled transaction>`."
)
self.name = "<unlabeled transaction>"
Span.finish(self, hub, end_timestamp)
if not self.sampled:
# At this point a `sampled = None` should have already been resolved
# to a concrete decision.
if self.sampled is None:
logger.warning("Discarding transaction without sampling decision.")
return None
finished_spans = [
span.to_json()
for span in self._span_recorder.spans
if span.timestamp is not None
]
# we do this to break the circular reference of transaction -> span
# recorder -> span -> containing transaction (which is where we started)
# before either the spans or the transaction goes out of scope and has
# to be garbage collected
self._span_recorder = None
contexts = {}
contexts.update(self._contexts)
contexts.update({"trace": self.get_trace_context()})
event = {
"type": "transaction",
"transaction": self.name,
"transaction_info": {"source": self.source},
"contexts": contexts,
"tags": self._tags,
"timestamp": self.timestamp,
"start_timestamp": self.start_timestamp,
"spans": finished_spans,
} # type: Event
if hub.client is not None and self._profile is not None:
event["profile"] = self._profile
contexts.update({"profile": self._profile.get_profile_context()})
if has_custom_measurements_enabled():
event["measurements"] = self._measurements
return hub.capture_event(event)
def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
if not has_custom_measurements_enabled():
logger.debug(
"[Tracing] Experimental custom_measurements feature is disabled"
)
return
self._measurements[name] = {"value": value, "unit": unit}
def set_context(self, key, value):
# type: (str, Any) -> None
self._contexts[key] = value
def to_json(self):
# type: () -> Dict[str, Any]
rv = super(Transaction, self).to_json()
rv["name"] = self.name
rv["source"] = self.source
rv["sampled"] = self.sampled
return rv
def get_baggage(self):
# type: () -> Baggage
"""
The first time a new baggage with sentry items is made,
it will be frozen.
"""
if not self._baggage or self._baggage.mutable:
self._baggage = Baggage.populate_from_transaction(self)
return self._baggage
def _set_initial_sampling_decision(self, sampling_context):
# type: (SamplingContext) -> None
"""
Sets the transaction's sampling decision, according to the following
precedence rules:
1. If a sampling decision is passed to `start_transaction`
(`start_transaction(name: "my transaction", sampled: True)`), that
decision will be used, regardless of anything else
2. If `traces_sampler` is defined, its decision will be used. It can
choose to keep or ignore any parent sampling decision, or use the
sampling context data to make its own decision or to choose a sample
rate for the transaction.
3. If `traces_sampler` is not defined, but there's a parent sampling
decision, the parent sampling decision will be used.
4. If `traces_sampler` is not defined and there's no parent sampling
decision, `traces_sample_rate` will be used.
"""
hub = self.hub or sentry_sdk.Hub.current
client = hub.client
options = (client and client.options) or {}
transaction_description = "{op}transaction <{name}>".format(
op=("<" + self.op + "> " if self.op else ""), name=self.name
)
# nothing to do if there's no client or if tracing is disabled
if not client or not has_tracing_enabled(options):
self.sampled = False
return
# if the user has forced a sampling decision by passing a `sampled`
# value when starting the transaction, go with that
if self.sampled is not None:
self.sample_rate = float(self.sampled)
return
# we would have bailed already if neither `traces_sampler` nor
# `traces_sample_rate` were defined, so one of these should work; prefer
# the hook if so
sample_rate = (
options["traces_sampler"](sampling_context)
if callable(options.get("traces_sampler"))
else (
# default inheritance behavior
sampling_context["parent_sampled"]
if sampling_context["parent_sampled"] is not None
else options["traces_sample_rate"]
)
)
# Since this is coming from the user (or from a function provided by the
# user), who knows what we might get. (The only valid values are
# booleans or numbers between 0 and 1.)
if not is_valid_sample_rate(sample_rate):
logger.warning(
"[Tracing] Discarding {transaction_description} because of invalid sample rate.".format(
transaction_description=transaction_description,
)
)
self.sampled = False
return
self.sample_rate = float(sample_rate)
# if the function returned 0 (or false), or if `traces_sample_rate` is
# 0, it's a sign the transaction should be dropped
if not sample_rate:
logger.debug(
"[Tracing] Discarding {transaction_description} because {reason}".format(
transaction_description=transaction_description,
reason=(
"traces_sampler returned 0 or False"
if callable(options.get("traces_sampler"))
else "traces_sample_rate is set to 0"
),
)
)
self.sampled = False
return
# Now we roll the dice. random.random is inclusive of 0, but not of 1,
# so strict < is safe here. In case sample_rate is a boolean, cast it
# to a float (True becomes 1.0 and False becomes 0.0)
self.sampled = random.random() < float(sample_rate)
if self.sampled:
logger.debug(
"[Tracing] Starting {transaction_description}".format(
transaction_description=transaction_description,
)
)
else:
logger.debug(
"[Tracing] Discarding {transaction_description} because it's not included in the random sample (sampling rate = {sample_rate})".format(
transaction_description=transaction_description,
sample_rate=float(sample_rate),
)
)
class NoOpSpan(Span):
def __repr__(self):
# type: () -> str
return self.__class__.__name__
def start_child(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
# type: (str, **Any) -> NoOpSpan
return NoOpSpan()
def new_span(self, **kwargs):
# type: (**Any) -> NoOpSpan
pass
def set_tag(self, key, value):
# type: (str, Any) -> None
pass
def set_data(self, key, value):
# type: (str, Any) -> None
pass
def set_status(self, value):
# type: (str) -> None
pass
def set_http_status(self, http_status):
# type: (int) -> None
pass
def finish(self, hub=None, end_timestamp=None):
# type: (Optional[sentry_sdk.Hub], Optional[datetime]) -> Optional[str]
pass
# Circular imports
from sentry_sdk.tracing_utils import (
Baggage,
EnvironHeaders,
compute_tracestate_entry,
extract_sentrytrace_data,
extract_tracestate_data,
has_tracestate_enabled,
has_tracing_enabled,
is_valid_sample_rate,
maybe_create_breadcrumbs_from_span,
has_custom_measurements_enabled,
)
| 31,209 | Python | 33.448124 | 151 | 0.581819 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/profiler.py | """
This file is originally based on code from https://github.com/nylas/nylas-perftools, which is published under the following license:
The MIT License (MIT)
Copyright (c) 2014 Nylas
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import atexit
import os
import platform
import random
import sys
import threading
import time
import uuid
from collections import deque
from contextlib import contextmanager
import sentry_sdk
from sentry_sdk._compat import PY33, PY311
from sentry_sdk._types import MYPY
from sentry_sdk.utils import (
filename_for_module,
handle_in_app_impl,
logger,
nanosecond_time,
)
if MYPY:
from types import FrameType
from typing import Any
from typing import Callable
from typing import Deque
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Set
from typing import Sequence
from typing import Tuple
from typing_extensions import TypedDict
import sentry_sdk.tracing
ThreadId = str
# The exact value of this id is not very meaningful. The purpose
# of this id is to give us a compact and unique identifier for a
# raw stack that can be used as a key to a dictionary so that it
# can be used during the sampled format generation.
RawStackId = Tuple[int, int]
RawFrame = Tuple[
str, # abs_path
Optional[str], # module
Optional[str], # filename
str, # function
int, # lineno
]
RawStack = Tuple[RawFrame, ...]
RawSample = Sequence[Tuple[str, Tuple[RawStackId, RawStack]]]
ProcessedSample = TypedDict(
"ProcessedSample",
{
"elapsed_since_start_ns": str,
"thread_id": ThreadId,
"stack_id": int,
},
)
ProcessedStack = List[int]
ProcessedFrame = TypedDict(
"ProcessedFrame",
{
"abs_path": str,
"filename": Optional[str],
"function": str,
"lineno": int,
"module": Optional[str],
},
)
ProcessedThreadMetadata = TypedDict(
"ProcessedThreadMetadata",
{"name": str},
)
ProcessedProfile = TypedDict(
"ProcessedProfile",
{
"frames": List[ProcessedFrame],
"stacks": List[ProcessedStack],
"samples": List[ProcessedSample],
"thread_metadata": Dict[ThreadId, ProcessedThreadMetadata],
},
)
ProfileContext = TypedDict(
"ProfileContext",
{"profile_id": str},
)
try:
from gevent.monkey import is_module_patched # type: ignore
except ImportError:
def is_module_patched(*args, **kwargs):
# type: (*Any, **Any) -> bool
# unable to import from gevent means no modules have been patched
return False
_scheduler = None # type: Optional[Scheduler]
def setup_profiler(options):
# type: (Dict[str, Any]) -> None
"""
`buffer_secs` determines the max time a sample will be buffered for
`frequency` determines the number of samples to take per second (Hz)
"""
global _scheduler
if _scheduler is not None:
logger.debug("profiling is already setup")
return
if not PY33:
logger.warn("profiling is only supported on Python >= 3.3")
return
frequency = 101
if is_module_patched("threading") or is_module_patched("_thread"):
# If gevent has patched the threading modules then we cannot rely on
# them to spawn a native thread for sampling.
# Instead we default to the GeventScheduler which is capable of
# spawning native threads within gevent.
default_profiler_mode = GeventScheduler.mode
else:
default_profiler_mode = ThreadScheduler.mode
profiler_mode = options["_experiments"].get("profiler_mode", default_profiler_mode)
if (
profiler_mode == ThreadScheduler.mode
# for legacy reasons, we'll keep supporting sleep mode for this scheduler
or profiler_mode == "sleep"
):
_scheduler = ThreadScheduler(frequency=frequency)
elif profiler_mode == GeventScheduler.mode:
try:
_scheduler = GeventScheduler(frequency=frequency)
except ImportError:
raise ValueError("Profiler mode: {} is not available".format(profiler_mode))
else:
raise ValueError("Unknown profiler mode: {}".format(profiler_mode))
_scheduler.setup()
atexit.register(teardown_profiler)
def teardown_profiler():
# type: () -> None
global _scheduler
if _scheduler is not None:
_scheduler.teardown()
_scheduler = None
# We want to impose a stack depth limit so that samples aren't too large.
MAX_STACK_DEPTH = 128
def extract_stack(
frame, # type: Optional[FrameType]
cwd, # type: str
prev_cache=None, # type: Optional[Tuple[RawStackId, RawStack, Deque[FrameType]]]
max_stack_depth=MAX_STACK_DEPTH, # type: int
):
# type: (...) -> Tuple[RawStackId, RawStack, Deque[FrameType]]
"""
Extracts the stack starting the specified frame. The extracted stack
assumes the specified frame is the top of the stack, and works back
to the bottom of the stack.
In the event that the stack is more than `MAX_STACK_DEPTH` frames deep,
only the first `MAX_STACK_DEPTH` frames will be returned.
"""
frames = deque(maxlen=max_stack_depth) # type: Deque[FrameType]
while frame is not None:
frames.append(frame)
frame = frame.f_back
if prev_cache is None:
stack = tuple(extract_frame(frame, cwd) for frame in frames)
else:
_, prev_stack, prev_frames = prev_cache
prev_depth = len(prev_frames)
depth = len(frames)
# We want to match the frame found in this sample to the frames found in the
# previous sample. If they are the same (using the `is` operator), we can
# skip the expensive work of extracting the frame information and reuse what
# we extracted during the last sample.
#
# Make sure to keep in mind that the stack is ordered from the inner most
# from to the outer most frame so be careful with the indexing.
stack = tuple(
prev_stack[i]
if i >= 0 and frame is prev_frames[i]
else extract_frame(frame, cwd)
for i, frame in zip(range(prev_depth - depth, prev_depth), frames)
)
# Instead of mapping the stack into frame ids and hashing
# that as a tuple, we can directly hash the stack.
# This saves us from having to generate yet another list.
# Additionally, using the stack as the key directly is
# costly because the stack can be large, so we pre-hash
# the stack, and use the hash as the key as this will be
# needed a few times to improve performance.
#
# To Reduce the likelihood of hash collisions, we include
# the stack depth. This means that only stacks of the same
# depth can suffer from hash collisions.
stack_id = len(stack), hash(stack)
return stack_id, stack, frames
def extract_frame(frame, cwd):
# type: (FrameType, str) -> RawFrame
abs_path = frame.f_code.co_filename
try:
module = frame.f_globals["__name__"]
except Exception:
module = None
# namedtuples can be many times slower when initialing
# and accessing attribute so we opt to use a tuple here instead
return (
# This originally was `os.path.abspath(abs_path)` but that had
# a large performance overhead.
#
# According to docs, this is equivalent to
# `os.path.normpath(os.path.join(os.getcwd(), path))`.
# The `os.getcwd()` call is slow here, so we precompute it.
#
# Additionally, since we are using normalized path already,
# we skip calling `os.path.normpath` entirely.
os.path.join(cwd, abs_path),
module,
filename_for_module(module, abs_path) or None,
get_frame_name(frame),
frame.f_lineno,
)
if PY311:
def get_frame_name(frame):
# type: (FrameType) -> str
return frame.f_code.co_qualname # type: ignore
else:
def get_frame_name(frame):
# type: (FrameType) -> str
f_code = frame.f_code
co_varnames = f_code.co_varnames
# co_name only contains the frame name. If the frame was a method,
# the class name will NOT be included.
name = f_code.co_name
# if it was a method, we can get the class name by inspecting
# the f_locals for the `self` argument
try:
if (
# the co_varnames start with the frame's positional arguments
# and we expect the first to be `self` if its an instance method
co_varnames
and co_varnames[0] == "self"
and "self" in frame.f_locals
):
for cls in frame.f_locals["self"].__class__.__mro__:
if name in cls.__dict__:
return "{}.{}".format(cls.__name__, name)
except AttributeError:
pass
# if it was a class method, (decorated with `@classmethod`)
# we can get the class name by inspecting the f_locals for the `cls` argument
try:
if (
# the co_varnames start with the frame's positional arguments
# and we expect the first to be `cls` if its a class method
co_varnames
and co_varnames[0] == "cls"
and "cls" in frame.f_locals
):
for cls in frame.f_locals["cls"].__mro__:
if name in cls.__dict__:
return "{}.{}".format(cls.__name__, name)
except AttributeError:
pass
# nothing we can do if it is a staticmethod (decorated with @staticmethod)
# we've done all we can, time to give up and return what we have
return name
MAX_PROFILE_DURATION_NS = int(3e10) # 30 seconds
class Profile(object):
def __init__(
self,
scheduler, # type: Scheduler
transaction, # type: sentry_sdk.tracing.Transaction
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> None
self.scheduler = scheduler
self.transaction = transaction
self.hub = hub
self.active_thread_id = None # type: Optional[int]
self.start_ns = 0 # type: int
self.stop_ns = 0 # type: int
self.active = False # type: bool
self.event_id = uuid.uuid4().hex # type: str
self.indexed_frames = {} # type: Dict[RawFrame, int]
self.indexed_stacks = {} # type: Dict[RawStackId, int]
self.frames = [] # type: List[ProcessedFrame]
self.stacks = [] # type: List[ProcessedStack]
self.samples = [] # type: List[ProcessedSample]
transaction._profile = self
def get_profile_context(self):
# type: () -> ProfileContext
return {"profile_id": self.event_id}
def __enter__(self):
# type: () -> None
hub = self.hub or sentry_sdk.Hub.current
_, scope = hub._stack[-1]
old_profile = scope.profile
scope.profile = self
self._context_manager_state = (hub, scope, old_profile)
self.start_ns = nanosecond_time()
self.scheduler.start_profiling(self)
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
self.scheduler.stop_profiling(self)
self.stop_ns = nanosecond_time()
_, scope, old_profile = self._context_manager_state
del self._context_manager_state
scope.profile = old_profile
def write(self, ts, sample):
# type: (int, RawSample) -> None
if ts < self.start_ns:
return
offset = ts - self.start_ns
if offset > MAX_PROFILE_DURATION_NS:
return
elapsed_since_start_ns = str(offset)
for tid, (stack_id, stack) in sample:
# Check if the stack is indexed first, this lets us skip
# indexing frames if it's not necessary
if stack_id not in self.indexed_stacks:
for frame in stack:
if frame not in self.indexed_frames:
self.indexed_frames[frame] = len(self.indexed_frames)
self.frames.append(
{
"abs_path": frame[0],
"module": frame[1],
"filename": frame[2],
"function": frame[3],
"lineno": frame[4],
}
)
self.indexed_stacks[stack_id] = len(self.indexed_stacks)
self.stacks.append([self.indexed_frames[frame] for frame in stack])
self.samples.append(
{
"elapsed_since_start_ns": elapsed_since_start_ns,
"thread_id": tid,
"stack_id": self.indexed_stacks[stack_id],
}
)
def process(self):
# type: () -> ProcessedProfile
# This collects the thread metadata at the end of a profile. Doing it
# this way means that any threads that terminate before the profile ends
# will not have any metadata associated with it.
thread_metadata = {
str(thread.ident): {
"name": str(thread.name),
}
for thread in threading.enumerate()
} # type: Dict[str, ProcessedThreadMetadata]
return {
"frames": self.frames,
"stacks": self.stacks,
"samples": self.samples,
"thread_metadata": thread_metadata,
}
def to_json(self, event_opt, options):
# type: (Any, Dict[str, Any]) -> Dict[str, Any]
profile = self.process()
handle_in_app_impl(
profile["frames"], options["in_app_exclude"], options["in_app_include"]
)
return {
"environment": event_opt.get("environment"),
"event_id": self.event_id,
"platform": "python",
"profile": profile,
"release": event_opt.get("release", ""),
"timestamp": event_opt["timestamp"],
"version": "1",
"device": {
"architecture": platform.machine(),
},
"os": {
"name": platform.system(),
"version": platform.release(),
},
"runtime": {
"name": platform.python_implementation(),
"version": platform.python_version(),
},
"transactions": [
{
"id": event_opt["event_id"],
"name": self.transaction.name,
# we start the transaction before the profile and this is
# the transaction start time relative to the profile, so we
# hardcode it to 0 until we can start the profile before
"relative_start_ns": "0",
# use the duration of the profile instead of the transaction
# because we end the transaction after the profile
"relative_end_ns": str(self.stop_ns - self.start_ns),
"trace_id": self.transaction.trace_id,
"active_thread_id": str(
self.transaction._active_thread_id
if self.active_thread_id is None
else self.active_thread_id
),
}
],
}
class Scheduler(object):
mode = "unknown"
def __init__(self, frequency):
# type: (int) -> None
self.interval = 1.0 / frequency
self.sampler = self.make_sampler()
self.new_profiles = deque() # type: Deque[Profile]
self.active_profiles = set() # type: Set[Profile]
def __enter__(self):
# type: () -> Scheduler
self.setup()
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
self.teardown()
def setup(self):
# type: () -> None
raise NotImplementedError
def teardown(self):
# type: () -> None
raise NotImplementedError
def start_profiling(self, profile):
# type: (Profile) -> None
profile.active = True
self.new_profiles.append(profile)
def stop_profiling(self, profile):
# type: (Profile) -> None
profile.active = False
def make_sampler(self):
# type: () -> Callable[..., None]
cwd = os.getcwd()
# In Python3+, we can use the `nonlocal` keyword to rebind the value,
# but this is not possible in Python2. To get around this, we wrap
# the value in a list to allow updating this value each sample.
last_sample = [
{}
] # type: List[Dict[int, Tuple[RawStackId, RawStack, Deque[FrameType]]]]
def _sample_stack(*args, **kwargs):
# type: (*Any, **Any) -> None
"""
Take a sample of the stack on all the threads in the process.
This should be called at a regular interval to collect samples.
"""
# no profiles taking place, so we can stop early
if not self.new_profiles and not self.active_profiles:
# make sure to clear the cache if we're not profiling so we dont
# keep a reference to the last stack of frames around
last_sample[0] = {}
return
# This is the number of profiles we want to pop off.
# It's possible another thread adds a new profile to
# the list and we spend longer than we want inside
# the loop below.
#
# Also make sure to set this value before extracting
# frames so we do not write to any new profiles that
# were started after this point.
new_profiles = len(self.new_profiles)
now = nanosecond_time()
raw_sample = {
tid: extract_stack(frame, cwd, last_sample[0].get(tid))
for tid, frame in sys._current_frames().items()
}
# make sure to update the last sample so the cache has
# the most recent stack for better cache hits
last_sample[0] = raw_sample
sample = [
(str(tid), (stack_id, stack))
for tid, (stack_id, stack, _) in raw_sample.items()
]
# Move the new profiles into the active_profiles set.
#
# We cannot directly add the to active_profiles set
# in `start_profiling` because it is called from other
# threads which can cause a RuntimeError when it the
# set sizes changes during iteration without a lock.
#
# We also want to avoid using a lock here so threads
# that are starting profiles are not blocked until it
# can acquire the lock.
for _ in range(new_profiles):
self.active_profiles.add(self.new_profiles.popleft())
inactive_profiles = []
for profile in self.active_profiles:
if profile.active:
profile.write(now, sample)
else:
# If a thread is marked inactive, we buffer it
# to `inactive_profiles` so it can be removed.
# We cannot remove it here as it would result
# in a RuntimeError.
inactive_profiles.append(profile)
for profile in inactive_profiles:
self.active_profiles.remove(profile)
return _sample_stack
class ThreadScheduler(Scheduler):
"""
This scheduler is based on running a daemon thread that will call
the sampler at a regular interval.
"""
mode = "thread"
name = "sentry.profiler.ThreadScheduler"
def __init__(self, frequency):
# type: (int) -> None
super(ThreadScheduler, self).__init__(frequency=frequency)
# used to signal to the thread that it should stop
self.event = threading.Event()
# make sure the thread is a daemon here otherwise this
# can keep the application running after other threads
# have exited
self.thread = threading.Thread(name=self.name, target=self.run, daemon=True)
def setup(self):
# type: () -> None
self.thread.start()
def teardown(self):
# type: () -> None
self.event.set()
self.thread.join()
def run(self):
# type: () -> None
last = time.perf_counter()
while True:
if self.event.is_set():
break
self.sampler()
# some time may have elapsed since the last time
# we sampled, so we need to account for that and
# not sleep for too long
elapsed = time.perf_counter() - last
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
# after sleeping, make sure to take the current
# timestamp so we can use it next iteration
last = time.perf_counter()
class GeventScheduler(Scheduler):
"""
This scheduler is based on the thread scheduler but adapted to work with
gevent. When using gevent, it may monkey patch the threading modules
(`threading` and `_thread`). This results in the use of greenlets instead
of native threads.
This is an issue because the sampler CANNOT run in a greenlet because
1. Other greenlets doing sync work will prevent the sampler from running
2. The greenlet runs in the same thread as other greenlets so when taking
a sample, other greenlets will have been evicted from the thread. This
results in a sample containing only the sampler's code.
"""
mode = "gevent"
name = "sentry.profiler.GeventScheduler"
def __init__(self, frequency):
# type: (int) -> None
# This can throw an ImportError that must be caught if `gevent` is
# not installed.
from gevent.threadpool import ThreadPool # type: ignore
super(GeventScheduler, self).__init__(frequency=frequency)
# used to signal to the thread that it should stop
self.event = threading.Event()
# Using gevent's ThreadPool allows us to bypass greenlets and spawn
# native threads.
self.pool = ThreadPool(1)
def setup(self):
# type: () -> None
self.pool.spawn(self.run)
def teardown(self):
# type: () -> None
self.event.set()
self.pool.join()
def run(self):
# type: () -> None
last = time.perf_counter()
while True:
if self.event.is_set():
break
self.sampler()
# some time may have elapsed since the last time
# we sampled, so we need to account for that and
# not sleep for too long
elapsed = time.perf_counter() - last
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
# after sleeping, make sure to take the current
# timestamp so we can use it next iteration
last = time.perf_counter()
def _should_profile(transaction, hub):
# type: (sentry_sdk.tracing.Transaction, sentry_sdk.Hub) -> bool
# The corresponding transaction was not sampled,
# so don't generate a profile for it.
if not transaction.sampled:
return False
# The profiler hasn't been properly initialized.
if _scheduler is None:
return False
client = hub.client
# The client is None, so we can't get the sample rate.
if client is None:
return False
options = client.options
profiles_sample_rate = options["_experiments"].get("profiles_sample_rate")
# The profiles_sample_rate option was not set, so profiling
# was never enabled.
if profiles_sample_rate is None:
return False
return random.random() < float(profiles_sample_rate)
@contextmanager
def start_profiling(transaction, hub=None):
# type: (sentry_sdk.tracing.Transaction, Optional[sentry_sdk.Hub]) -> Generator[None, None, None]
hub = hub or sentry_sdk.Hub.current
# if profiling was not enabled, this should be a noop
if _should_profile(transaction, hub):
assert _scheduler is not None
with Profile(_scheduler, transaction, hub):
yield
else:
yield
| 26,041 | Python | 32.733161 | 460 | 0.585423 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.