library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_symmetric_rank
def test_symmetric_rank(self): assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) # rank deficient matrix I = np.eye(4) I[-1, -1] = 0.0 assert_equal(3, matrix_rank(I, hermitian=True)) # manually supplied tolerance I[-1, -1] = 1e-8 assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class TestMatrixRank(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_reduced_rank
def test_reduced_rank(self): # Test matrices with reduced rank # rng = np.random.RandomState(20120714) np.random.seed(20120714) for i in range(100): # Make a rank deficient matrix X = np.random.normal(size=(40, 10)) X[:, 0] = X[:, 1] + X[:, 2] # Assert that matrix_rank detected deficiency assert_equal(matrix_rank(X), 9) X[:, 3] = X[:, 4] + X[:, 5] assert_equal(matrix_rank(X), 8)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class TestMatrixRank(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_mode_raw
def test_mode_raw(self): # The factorization is not unique and varies between libraries, # so it is not possible to check against known values. Functional # testing is a possibility, but awaits the exposure of more # of the functions in lapack_lite. Consequently, this test is # very limited in scope. Note that the results are in FORTRAN # order, hence the h arrays are transposed. a = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) # Test double h, tau = linalg.qr(a, mode="raw") assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (2, 3)) assert_(tau.shape == (2,)) h, tau = linalg.qr(a.T, mode="raw") assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (3, 2)) assert_(tau.shape == (2,))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestQR(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_exceptions_not_invertible
def test_exceptions_not_invertible(self, dt): if dt in self.dtnoinv: return mat = self.noninv.astype(dt) assert_raises(LinAlgError, matrix_power, mat, -1)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @skip # FIXME: otherwise fails in setUp calling np.block @instantiate_parametrized_tests class TestMatrixPower(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
do
def do(self, a, b, tags): x = linalg.solve(a, b) assert_almost_equal(b, dot_generalized(a, x)) assert_(consistent_subclass(x, b))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_invalid
def test_invalid(self): x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, UPLO="lrong") assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, "lower") assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, "upper")
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestEigvalsh(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_UPLO
def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w = np.linalg.eigvalsh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w = np.linalg.eigvalsh(Klo, UPLO="L") assert_allclose(w, tgt, rtol=rtol) # Check 'l' w = np.linalg.eigvalsh(Klo, UPLO="l") assert_allclose(w, tgt, rtol=rtol) # Check 'U' w = np.linalg.eigvalsh(Kup, UPLO="U") assert_allclose(w, tgt, rtol=rtol) # Check 'u' w = np.linalg.eigvalsh(Kup, UPLO="u") assert_allclose(w, tgt, rtol=rtol)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestEigvalsh(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
do
def do(self, a, b, tags): x = linalg.solve(a, b) assert_almost_equal(b, dot_generalized(a, x)) assert_(consistent_subclass(x, b))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_invalid
def test_invalid(self): x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, UPLO="lrong") assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, "lower") assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, "upper")
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestEigvalsh(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_UPLO
def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w = np.linalg.eigvalsh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w = np.linalg.eigvalsh(Klo, UPLO="L") assert_allclose(w, tgt, rtol=rtol) # Check 'l' w = np.linalg.eigvalsh(Klo, UPLO="l") assert_allclose(w, tgt, rtol=rtol) # Check 'U' w = np.linalg.eigvalsh(Kup, UPLO="U") assert_allclose(w, tgt, rtol=rtol) # Check 'u' w = np.linalg.eigvalsh(Kup, UPLO="u") assert_allclose(w, tgt, rtol=rtol)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestEigvalsh(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_empty_a_b
def test_empty_a_b(self, m, n, n_rhs): a = np.arange(m * n).reshape(m, n) b = np.ones((m, n_rhs)) x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) if m == 0: assert_((x == 0).all()) assert_equal(x.shape, (n, n_rhs)) assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) if m > n and n_rhs > 0: # residuals are exactly the squared norms of b's columns r = b - np.dot(a, x) assert_almost_equal(residuals, (r * r).sum(axis=-2)) assert_equal(rank, min(m, n)) assert_equal(s.shape, (min(m, n),))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestLstsq(LstsqCases, TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_incompatible_dims
def test_incompatible_dims(self): # use modified version of docstring example x = np.array([0, 1, 2, 3]) y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) A = np.vstack([x, np.ones(len(x))]).T # with assert_raises_regex(LinAlgError, "Incompatible dimensions"): with assert_raises((RuntimeError, LinAlgError)): linalg.lstsq(A, y, rcond=None) # @xfail #(reason="no block()")
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestLstsq(LstsqCases, TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
tz
def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
tz
def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
tz
def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
tz
def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
check_dtype
def check_dtype(x, res): if issubclass(x.dtype.type, np.inexact): assert_equal(res.dtype, x.real.dtype) else: # For integer input, don't have to test float precision of output. assert_(issubclass(res.dtype.type, np.floating))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNormBase: import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_empty
def test_empty(self): assert_equal(norm([]), 0.0) assert_equal(norm(array([], dtype=self.dt)), 0.0) assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNormGeneral(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_vector_return_type
def test_vector_return_type(self): a = np.array([1, 0, 1]) exact_types = "Bbhil" # np.typecodes["AllInteger"] inexact_types = "efdFD" # np.typecodes["AllFloat"] all_types = exact_types + inexact_types for each_type in all_types: at = a.astype(each_type) if each_type == np.dtype("float16"): # FIXME: move looping to parametrize, add decorators=[xfail] # pytest.xfail("float16**float64 => float64 (?)") raise SkipTest("float16**float64 => float64 (?)") an = norm(at, -np.inf) self.check_dtype(at, an) assert_almost_equal(an, 0.0) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered") an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) an = norm(at, 0) self.check_dtype(at, an) assert_almost_equal(an, 2) an = norm(at, 1) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, 2) self.check_dtype(at, an) assert_almost_equal(an, an.dtype.type(2.0) ** an.dtype.type(1.0 / 2.0)) an = norm(at, 4) self.check_dtype(at, an) assert_almost_equal(an, an.dtype.type(2.0) ** an.dtype.type(1.0 / 4.0)) an = norm(at, np.inf) self.check_dtype(at, an) assert_almost_equal(an, 1.0)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNormGeneral(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_vector
def test_vector(self): a = [1, 2, 3, 4] b = [-1, -2, -3, -4] c = [-1, 2, -3, 4] def _test(v): np.testing.assert_almost_equal(norm(v), 30**0.5, decimal=self.dec) np.testing.assert_almost_equal(norm(v, inf), 4.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -inf), 1.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 1), 10.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 2), 30**0.5, decimal=self.dec) np.testing.assert_almost_equal( norm(v, -2), ((205.0 / 144) ** -0.5), decimal=self.dec ) np.testing.assert_almost_equal(norm(v, 0), 4, decimal=self.dec) for v in ( a, b, c, ): _test(v) for v in ( array(a, dtype=self.dt), array(b, dtype=self.dt), array(c, dtype=self.dt), ): _test(v)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNormGeneral(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
_test
def _test(v): np.testing.assert_almost_equal(norm(v), 30**0.5, decimal=self.dec) np.testing.assert_almost_equal(norm(v, inf), 4.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -inf), 1.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 1), 10.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 2), 30**0.5, decimal=self.dec) np.testing.assert_almost_equal( norm(v, -2), ((205.0 / 144) ** -0.5), decimal=self.dec ) np.testing.assert_almost_equal(norm(v, 0), 4, decimal=self.dec) for v in ( a, b, c, ): _test(v) for v in ( array(a, dtype=self.dt), array(b, dtype=self.dt), array(c, dtype=self.dt), ): _test(v)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_axis
def test_axis(self): # Vector norms. # Compare the use of `axis` with computing the norm of each row # or column separately. A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] assert_almost_equal(norm(A, ord=order, axis=0), expected0) expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] assert_almost_equal(norm(A, ord=order, axis=1), expected1) # Matrix norms. B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) nd = B.ndim for order in [None, -2, 2, -1, 1, np.inf, -np.inf, "fro"]: for axis in itertools.combinations(range(-nd, nd), 2): row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if row_axis == col_axis: assert_raises( (RuntimeError, ValueError), norm, B, ord=order, axis=axis ) else: n = norm(B, ord=order, axis=axis) # The logic using k_index only works for nd = 3. # This has to be changed if nd is increased. k_index = nd - (row_axis + col_axis) if row_axis < col_axis: expected = [ norm(B[:].take(k, axis=k_index), ord=order) for k in range(B.shape[k_index]) ] else: expected = [ norm(B[:].take(k, axis=k_index).T, ord=order) for k in range(B.shape[k_index]) ] assert_almost_equal(n, expected)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNormGeneral(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_keepdims
def test_keepdims(self): A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) allclose_err = "order {0}, axis = {1}" shape_err = "Shape mismatch found {0}, expected {1}, order={2}, axis={3}" # check the order=None, axis=None case expected = norm(A, ord=None, axis=None) found = norm(A, ord=None, axis=None, keepdims=True) assert_allclose( np.squeeze(found), expected, err_msg=allclose_err.format(None, None) ) expected_shape = (1, 1, 1) assert_( found.shape == expected_shape, shape_err.format(found.shape, expected_shape, None, None), ) # Vector norms. for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: for k in range(A.ndim): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose( np.squeeze(found), expected, err_msg=allclose_err.format(order, k) ) expected_shape = list(A.shape) expected_shape[k] = 1 expected_shape = tuple(expected_shape) assert_( found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k), ) # Matrix norms. for order in [None, -2, 2, -1, 1, np.inf, -np.inf, "fro", "nuc"]: for k in itertools.permutations(range(A.ndim), 2): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose( np.squeeze(found), expected, err_msg=allclose_err.format(order, k) ) expected_shape = list(A.shape) expected_shape[k[0]] = 1 expected_shape[k[1]] = 1 expected_shape = tuple(expected_shape) assert_( found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k), )
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNormGeneral(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_matrix_empty
def test_matrix_empty(self): assert_equal(norm(np.array([[]], dtype=self.dt)), 0.0)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNorm2D(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_matrix_return_type
def test_matrix_return_type(self): a = np.array([[1, 0, 1], [0, 1, 1]]) exact_types = "Bbhil" # np.typecodes["AllInteger"] # float32, complex64, float64, complex128 types are the only types # allowed by `linalg`, which performs the matrix operations used # within `norm`. inexact_types = "fdFD" all_types = exact_types + inexact_types for each_type in all_types: at = a.astype(each_type) an = norm(at, -np.inf) self.check_dtype(at, an) assert_almost_equal(an, 2.0) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered") an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) an = norm(at, 1) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, 2) self.check_dtype(at, an) assert_almost_equal(an, 3.0 ** (1.0 / 2.0)) an = norm(at, -2) self.check_dtype(at, an) assert_almost_equal(an, 1.0) an = norm(at, np.inf) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, "fro") self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, "nuc") self.check_dtype(at, an) # Lower bar needed to support low precision floats. # They end up being off by 1 in the 7th place. np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class _TestNorm2D(_TestNormBase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_basic_function_with_two_arguments
def test_basic_function_with_two_arguments(self): # separate code path with two arguments A = np.random.random((6, 2)) B = np.random.random((2, 6)) assert_almost_equal(multi_dot([A, B]), A.dot(B)) assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_basic_function_with_dynamic_programming_optimization
def test_basic_function_with_dynamic_programming_optimization(self): # multi_dot with four or more arguments uses the dynamic programming # optimization and therefore deserve a separate A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) D = np.random.random((2, 1)) assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_vector_as_first_argument
def test_vector_as_first_argument(self): # The first argument can be 1-D A1d = np.random.random(2) # 1-D B = np.random.random((2, 6)) C = np.random.random((6, 2)) D = np.random.random((2, 2)) # the result should be 1-D assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_vector_as_last_argument
def test_vector_as_last_argument(self): # The last argument can be 1-D A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) D1d = np.random.random(2) # 1-D # the result should be 1-D assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_vector_as_first_and_last_argument
def test_vector_as_first_and_last_argument(self): # The first and last arguments can be 1-D A1d = np.random.random(2) # 1-D B = np.random.random((2, 6)) C = np.random.random((6, 2)) D1d = np.random.random(2) # 1-D # the result should be a scalar assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_three_arguments_and_out
def test_three_arguments_and_out(self): # multi_dot with three arguments uses a fast hand coded algorithm to # determine the optimal order. Therefore test it separately. A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) out = np.zeros((6, 2)) ret = multi_dot([A, B, C], out=out) assert out is ret assert_almost_equal(out, A.dot(B).dot(C)) assert_almost_equal(out, np.dot(A, np.dot(B, C)))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_two_arguments_and_out
def test_two_arguments_and_out(self): # separate code path with two arguments A = np.random.random((6, 2)) B = np.random.random((2, 6)) out = np.zeros((6, 6)) ret = multi_dot([A, B], out=out) assert out is ret assert_almost_equal(out, A.dot(B)) assert_almost_equal(out, np.dot(A, B))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_dynamic_programming_optimization_and_out
def test_dynamic_programming_optimization_and_out(self): # multi_dot with four or more arguments uses the dynamic programming # optimization and therefore deserve a separate test A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) D = np.random.random((2, 1)) out = np.zeros((6, 1)) ret = multi_dot([A, B, C, D], out=out) assert out is ret assert_almost_equal(out, A.dot(B).dot(C).dot(D))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_dynamic_programming_logic
def test_dynamic_programming_logic(self): # Test for the dynamic programming part # This test is directly taken from Cormen page 376. arrays = [ np.random.random((30, 35)), np.random.random((35, 15)), np.random.random((15, 5)), np.random.random((5, 10)), np.random.random((10, 20)), np.random.random((20, 25)), ] m_expected = np.array( [ [0.0, 15750.0, 7875.0, 9375.0, 11875.0, 15125.0], [0.0, 0.0, 2625.0, 4375.0, 7125.0, 10500.0], [0.0, 0.0, 0.0, 750.0, 2500.0, 5375.0], [0.0, 0.0, 0.0, 0.0, 1000.0, 3500.0], [0.0, 0.0, 0.0, 0.0, 0.0, 5000.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ] ) s_expected = np.array( [ [0, 1, 1, 3, 3, 3], [0, 0, 2, 3, 3, 3], [0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5], [0, 0, 0, 0, 0, 0], ], dtype=int, ) s_expected -= 1 # Cormen uses 1-based index, python does not. s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) # Only the upper triangular part (without the diagonal) is interesting. assert_almost_equal(np.triu(s[:-1, 1:]), np.triu(s_expected[:-1, 1:])) assert_almost_equal(np.triu(m), np.triu(m_expected))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_too_few_input_arrays
def test_too_few_input_arrays(self): assert_raises((RuntimeError, ValueError), multi_dot, []) assert_raises((RuntimeError, ValueError), multi_dot, [np.random.random((3, 3))])
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_empty_herm_cases
def test_empty_herm_cases(self): self.check_cases(require={"hermitian", "size-0"}, exclude={"generalized"})
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class HermitianTestCase(LinalgTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_generalized_empty_sq_cases
def test_generalized_empty_sq_cases(self): self.check_cases(require={"generalized", "square", "size-0"})
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class LinalgGeneralizedSquareTestCase(LinalgTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_generalized_empty_nonsq_cases
def test_generalized_empty_nonsq_cases(self): self.check_cases(require={"generalized", "nonsquare", "size-0"})
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_generalized_empty_herm_cases
def test_generalized_empty_herm_cases(self): self.check_cases( require={"generalized", "hermitian", "size-0"}, exclude={"none"} )
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class HermitianGeneralizedTestCase(LinalgTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
dot_generalized
def dot_generalized(a, b): a = asarray(a) if a.ndim >= 3: if a.ndim == b.ndim: # matrix x matrix new_shape = a.shape[:-1] + b.shape[-1:] elif a.ndim == b.ndim + 1: # matrix x vector new_shape = a.shape[:-1] else: raise ValueError("Not implemented...") r = np.empty(new_shape, dtype=np.common_type(a, b)) for c in itertools.product(*map(range, a.shape[:-2])): r[c] = dot(a[c], b[c]) return r else: return dot(a, b)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
identity_like_generalized
def identity_like_generalized(a): a = asarray(a) if a.ndim >= 3: r = np.empty(a.shape, dtype=a.dtype) r[...] = identity(a.shape[-2]) return r else: return identity(a.shape[0]) class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): # kept apart from TestSolve for use for testing with matrices. def do(self, a, b, tags): x = linalg.solve(a, b) assert_almost_equal(b, dot_generalized(a, x)) assert_(consistent_subclass(x, b)) @instantiate_parametrized_tests class TestSolve(SolveCases, TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(linalg.solve(x, x).dtype, dtype) @skip(reason="subclass") def test_0_size(self): class ArraySubclass(np.ndarray): pass # Test system of 0x0 matrices a = np.arange(8).reshape(2, 2, 2) b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) expected = linalg.solve(a, b)[:, 0:0, :] result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) # Test errors for non-square and only b's dimension being 0 assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) # Test broadcasting error b = np.arange(6).reshape(1, 3, 2) # broadcasting error assert_raises(ValueError, linalg.solve, a, b) assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) # Test zero "single equations" with 0x0 matrices. b = np.arange(2).reshape(1, 2).view(ArraySubclass) expected = linalg.solve(a, b)[:, 0:0] result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) b = np.arange(3).reshape(1, 3) assert_raises(ValueError, linalg.solve, a, b) assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) @skip(reason="subclass") def test_0_size_k(self): # test zero multiple equation (K=0) case. class ArraySubclass(np.ndarray): pass a = np.arange(4).reshape(1, 2, 2) b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) expected = linalg.solve(a, b)[:, :, 0:0] result = linalg.solve(a, b[:, :, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) # test both zero. expected = linalg.solve(a, b)[:, 0:0, 0:0] result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): a_inv = linalg.inv(a) assert_almost_equal(dot_generalized(a, a_inv), identity_like_generalized(a)) assert_(consistent_subclass(a_inv, a)) @instantiate_parametrized_tests class TestInv(InvCases, TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(linalg.inv(x).dtype, dtype) @skip(reason="subclass") def test_0_size(self): # Check that all kinds of 0-sized arrays work class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res = linalg.inv(a) assert_(res.dtype.type is np.float64) assert_equal(a.shape, res.shape) assert_(isinstance(res, ArraySubclass)) a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) res = linalg.inv(a) assert_(res.dtype.type is np.complex64) assert_equal(a.shape, res.shape) assert_(isinstance(res, ArraySubclass)) class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) @instantiate_parametrized_tests class TestEigvals(EigvalsCases, TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(linalg.eigvals(x).dtype, dtype) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) @skip(reason="subclass") def test_0_size(self): # Check that all kinds of 0-sized arrays work class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res = linalg.eigvals(a) assert_(res.dtype.type is np.float64) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) res = linalg.eigvals(a) assert_(res.dtype.type is np.complex64) assert_equal((0,), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): evalues, evectors = linalg.eig(a) assert_allclose( dot_generalized(a, evectors), np.asarray(evectors) * np.asarray(evalues)[..., None, :], rtol=get_rtol(evalues.dtype), ) assert_(consistent_subclass(evectors, a)) @instantiate_parametrized_tests class TestEig(EigCases, TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eig(x) assert_equal(w.dtype, dtype) assert_equal(v.dtype, dtype) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) w, v = np.linalg.eig(x) assert_equal(w.dtype, get_complex_dtype(dtype)) assert_equal(v.dtype, get_complex_dtype(dtype)) @skip(reason="subclass") def test_0_size(self): # Check that all kinds of 0-sized arrays work class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res, res_v = linalg.eig(a) assert_(res_v.dtype.type is np.float64) assert_(res.dtype.type is np.float64) assert_equal(a.shape, res_v.shape) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(a, np.ndarray)) a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) res, res_v = linalg.eig(a) assert_(res_v.dtype.type is np.complex64) assert_(res.dtype.type is np.complex64) assert_equal(a.shape, res_v.shape) assert_equal((0,), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(a, np.ndarray)) @instantiate_parametrized_tests class SVDBaseTests: hermitian = False @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) u, s, vh = linalg.svd(x) assert_equal(u.dtype, dtype) assert_equal(s.dtype, get_real_dtype(dtype)) assert_equal(vh.dtype, dtype) s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) assert_equal(s.dtype, get_real_dtype(dtype)) class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): u, s, vt = linalg.svd(a, False) assert_allclose( a, dot_generalized( np.asarray(u) * np.asarray(s)[..., None, :], np.asarray(vt) ), rtol=get_rtol(u.dtype), ) assert_(consistent_subclass(u, a)) assert_(consistent_subclass(vt, a)) class TestSVD(SVDCases, SVDBaseTests, TestCase): def test_empty_identity(self): """Empty input should put an identity matrix in u or vh""" x = np.empty((4, 0)) u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) assert_equal(u.shape, (4, 4)) assert_equal(vh.shape, (0, 0)) assert_equal(u, np.eye(4)) x = np.empty((0, 4)) u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) assert_equal(u.shape, (0, 0)) assert_equal(vh.shape, (4, 4)) assert_equal(vh, np.eye(4)) class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): u, s, vt = linalg.svd(a, False, hermitian=True) assert_allclose( a, dot_generalized( np.asarray(u) * np.asarray(s)[..., None, :], np.asarray(vt) ), rtol=get_rtol(u.dtype), ) def hermitian(mat): axes = list(range(mat.ndim)) axes[-1], axes[-2] = axes[-2], axes[-1] return np.conj(np.transpose(mat, axes=axes)) assert_almost_equal( np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape) ) assert_almost_equal( np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape), ) assert_equal(np.sort(s), np.flip(s, -1)) assert_(consistent_subclass(u, a)) assert_(consistent_subclass(vt, a)) class TestSVDHermitian(SVDHermitianCases, SVDBaseTests, TestCase): hermitian = True class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): # cond(x, p) for p in (None, 2, -2) def do(self, a, b, tags): c = asarray(a) # a might be a matrix if "size-0" in tags: assert_raises(LinAlgError, linalg.cond, c) return # +-2 norms s = linalg.svd(c, compute_uv=False) assert_almost_equal( linalg.cond(a), s[..., 0] / s[..., -1], single_decimal=5, double_decimal=11 ) assert_almost_equal( linalg.cond(a, 2), s[..., 0] / s[..., -1], single_decimal=5, double_decimal=11, ) assert_almost_equal( linalg.cond(a, -2), s[..., -1] / s[..., 0], single_decimal=5, double_decimal=11, ) # Other norms cinv = np.linalg.inv(c) assert_almost_equal( linalg.cond(a, 1), abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), single_decimal=5, double_decimal=11, ) assert_almost_equal( linalg.cond(a, -1), abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), single_decimal=5, double_decimal=11, ) assert_almost_equal( linalg.cond(a, np.inf), abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), single_decimal=5, double_decimal=11, ) assert_almost_equal( linalg.cond(a, -np.inf), abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), single_decimal=5, double_decimal=11, ) assert_almost_equal( linalg.cond(a, "fro"), np.sqrt((abs(c) ** 2).sum(-1).sum(-1) * (abs(cinv) ** 2).sum(-1).sum(-1)), single_decimal=5, double_decimal=11, ) class TestCond(CondCases, TestCase): def test_basic_nonsvd(self): # Smoketest the non-svd norms A = array([[1.0, 0, 1], [0, -2.0, 0], [0, 0, 3.0]]) assert_almost_equal(linalg.cond(A, inf), 4) assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, "fro"), np.sqrt(265 / 12)) def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise # exceptions As = [np.zeros((2, 2)), np.ones((2, 2))] p_pos = [None, 1, 2, "fro"] p_neg = [-1, -2] for A, p in itertools.product(As, p_pos): # Inversion may not hit exact infinity, so just check the # number is large assert_(linalg.cond(A, p) > 1e15) for A, p in itertools.product(As, p_neg): linalg.cond(A, p) @skip(reason="NP_VER: fails on CI") # ( # True, run=False, reason="Platform/LAPACK-dependent failure, see gh-18914" # ) def test_nan(self): # nans should be passed through, not converted to infs ps = [None, 1, -1, 2, -2, "fro"] p_pos = [None, 1, 2, "fro"] A = np.ones((2, 2)) A[0, 1] = np.nan for p in ps: c = linalg.cond(A, p) assert_(isinstance(c, np.float64)) assert_(np.isnan(c)) A = np.ones((3, 2, 2)) A[1, 0, 1] = np.nan for p in ps: c = linalg.cond(A, p) assert_(np.isnan(c[1])) if p in p_pos: assert_(c[0] > 1e15) assert_(c[2] > 1e15) else: assert_(not np.isnan(c[0])) assert_(not np.isnan(c[2])) def test_stacked_singular(self): # Check behavior when only some of the stacked matrices are # singular np.random.seed(1234) A = np.random.rand(2, 2, 2, 2) A[0, 0] = 0 A[1, 1] = 0 for p in (None, 1, 2, "fro", -1, -2): c = linalg.cond(A, p) assert_equal(c[0, 0], np.inf) assert_equal(c[1, 1], np.inf) assert_(np.isfinite(c[0, 1])) assert_(np.isfinite(c[1, 0])) class PinvCases( LinalgSquareTestCase, LinalgNonsquareTestCase, LinalgGeneralizedSquareTestCase, LinalgGeneralizedNonsquareTestCase, ): def do(self, a, b, tags): a_ginv = linalg.pinv(a) # `a @ a_ginv == I` does not hold if a is singular dot = dot_generalized assert_almost_equal( dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11 ) assert_(consistent_subclass(a_ginv, a)) class TestPinv(PinvCases, TestCase): pass class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): a_ginv = linalg.pinv(a, hermitian=True) # `a @ a_ginv == I` does not hold if a is singular dot = dot_generalized assert_almost_equal( dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11 ) assert_(consistent_subclass(a_ginv, a)) class TestPinvHermitian(PinvHermitianCases, TestCase): pass class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, np.prod(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), np.prod(ev, axis=-1), single_decimal=5) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = s != 0 assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf) @instantiate_parametrized_tests class TestDet(DetCases, TestCase): def test_zero(self): # NB: comment out tests of type(det) == double : we return zero-dim arrays assert_equal(linalg.det([[0.0]]), 0.0) # assert_equal(type(linalg.det([[0.0]])), double) assert_equal(linalg.det([[0.0j]]), 0.0) # assert_equal(type(linalg.det([[0.0j]])), cdouble) assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) # assert_equal(type(linalg.slogdet([[0.0]])[0]), double) # assert_equal(type(linalg.slogdet([[0.0]])[1]), double) assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) # assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) # assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(np.linalg.det(x).dtype, dtype) ph, s = np.linalg.slogdet(x) assert_equal(s.dtype, get_real_dtype(dtype)) assert_equal(ph.dtype, dtype) def test_0_size(self): a = np.zeros((0, 0), dtype=np.complex64) res = linalg.det(a) assert_equal(res, 1.0) assert_(res.dtype.type is np.complex64) res = linalg.slogdet(a) assert_equal(res, (1, 0)) assert_(res[0].dtype.type is np.complex64) assert_(res[1].dtype.type is np.float32) a = np.zeros((0, 0), dtype=np.float64) res = linalg.det(a) assert_equal(res, 1.0) assert_(res.dtype.type is np.float64) res = linalg.slogdet(a) assert_equal(res, (1, 0)) assert_(res[0].dtype.type is np.float64) assert_(res[1].dtype.type is np.float64) class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): def do(self, a, b, tags): arr = np.asarray(a) m, n = arr.shape u, s, vt = linalg.svd(a, False) x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) if m == 0: assert_((x == 0).all()) if m <= n: assert_almost_equal(b, dot(a, x), single_decimal=5) assert_equal(rank, m) else: assert_equal(rank, n) # assert_almost_equal(sv, sv.__array_wrap__(s)) if rank == n and m > n: expect_resids = (np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) expect_resids = np.asarray(expect_resids) if np.asarray(b).ndim == 1: expect_resids = expect_resids.reshape( 1, ) assert_equal(residuals.shape, expect_resids.shape) else: expect_resids = np.array([]) # .view(type(x)) assert_almost_equal(residuals, expect_resids, single_decimal=5) assert_(np.issubdtype(residuals.dtype, np.floating)) assert_(consistent_subclass(x, b)) assert_(consistent_subclass(residuals, b)) @instantiate_parametrized_tests class TestLstsq(LstsqCases, TestCase): @xpassIfTorchDynamo # (reason="Lstsq: we use the future default =None") def test_future_rcond(self): a = np.array( [ [0.0, 1.0, 0.0, 1.0, 2.0, 0.0], [0.0, 2.0, 0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0, 4.0], [0.0, 0.0, 0.0, 2.0, 3.0, 0.0], ] ).T b = np.array([1, 0, 0, 0, 0, 0]) with suppress_warnings() as sup: w = sup.record(FutureWarning, "`rcond` parameter will change") x, residuals, rank, s = linalg.lstsq(a, b) assert_(rank == 4) x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) assert_(rank == 4) x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) assert_(rank == 3) # Warning should be raised exactly once (first command) assert_(len(w) == 1) @parametrize( "m, n, n_rhs", [ (4, 2, 2), (0, 4, 1), (0, 4, 2), (4, 0, 1), (4, 0, 2), # (4, 2, 0), # Intel MKL ERROR: Parameter 4 was incorrect on entry to DLALSD. (0, 0, 0), ], ) def test_empty_a_b(self, m, n, n_rhs): a = np.arange(m * n).reshape(m, n) b = np.ones((m, n_rhs)) x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) if m == 0: assert_((x == 0).all()) assert_equal(x.shape, (n, n_rhs)) assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) if m > n and n_rhs > 0: # residuals are exactly the squared norms of b's columns r = b - np.dot(a, x) assert_almost_equal(residuals, (r * r).sum(axis=-2)) assert_equal(rank, min(m, n)) assert_equal(s.shape, (min(m, n),)) def test_incompatible_dims(self): # use modified version of docstring example x = np.array([0, 1, 2, 3]) y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) A = np.vstack([x, np.ones(len(x))]).T # with assert_raises_regex(LinAlgError, "Incompatible dimensions"): with assert_raises((RuntimeError, LinAlgError)): linalg.lstsq(A, y, rcond=None) # @xfail #(reason="no block()") @skip # FIXME: otherwise fails in setUp calling np.block @instantiate_parametrized_tests class TestMatrixPower(TestCase): def setUp(self): self.rshft_0 = np.eye(4) self.rshft_1 = self.rshft_0[[3, 0, 1, 2]] self.rshft_2 = self.rshft_0[[2, 3, 0, 1]] self.rshft_3 = self.rshft_0[[1, 2, 3, 0]] self.rshft_all = [self.rshft_0, self.rshft_1, self.rshft_2, self.rshft_3] self.noninv = array([[1, 0], [0, 0]]) self.stacked = np.block([[[self.rshft_0]]] * 2) # FIXME the 'e' dtype might work in future self.dtnoinv = [object, np.dtype("e"), np.dtype("g"), np.dtype("G")] @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_large_power(self, dt): rshft = self.rshft_1.astype(dt) assert_equal(matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) assert_equal(matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) assert_equal(matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) assert_equal(matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_power_is_zero(self, dt): def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt)) @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_power_is_one(self, dt): def tz(mat): mz = matrix_power(mat, 1) assert_equal(mz, mat) assert_equal(mz.dtype, mat.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt)) @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_power_is_two(self, dt): def tz(mat): mz = matrix_power(mat, 2) mmul = matmul if mat.dtype != object else dot assert_equal(mz, mmul(mat, mat)) assert_equal(mz.dtype, mat.dtype) for mat in self.rshft_all: tz(mat.astype(dt)) if dt != object: tz(self.stacked.astype(dt)) @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_power_is_minus_one(self, dt): def tz(mat): invmat = matrix_power(mat, -1) mmul = matmul if mat.dtype != object else dot assert_almost_equal(mmul(invmat, mat), identity_like_generalized(mat)) for mat in self.rshft_all: if dt not in self.dtnoinv: tz(mat.astype(dt)) @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_exceptions_bad_power(self, dt): mat = self.rshft_0.astype(dt) assert_raises(TypeError, matrix_power, mat, 1.5) assert_raises(TypeError, matrix_power, mat, [1]) @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_exceptions_non_square(self, dt): assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) @skipif(IS_WASM, reason="fp errors don't work in wasm") @parametrize("dt", [np.dtype(c) for c in "?bBhilefdFD"]) def test_exceptions_not_invertible(self, dt): if dt in self.dtnoinv: return mat = self.noninv.astype(dt) assert_raises(LinAlgError, matrix_power, mat, -1) class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): pytest.xfail(reason="sort complex") # note that eigenvalue arrays returned by eig must be sorted since # their order isn't guaranteed. ev = linalg.eigvalsh(a, "L") evalues, evectors = linalg.eig(a) evalues.sort(axis=-1) assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) ev2 = linalg.eigvalsh(a, "U") assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) @instantiate_parametrized_tests class TestEigvalsh(TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w = np.linalg.eigvalsh(x) assert_equal(w.dtype, get_real_dtype(dtype)) def test_invalid(self): x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, UPLO="lrong") assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, "lower") assert_raises((RuntimeError, ValueError), np.linalg.eigvalsh, x, "upper") def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w = np.linalg.eigvalsh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w = np.linalg.eigvalsh(Klo, UPLO="L") assert_allclose(w, tgt, rtol=rtol) # Check 'l' w = np.linalg.eigvalsh(Klo, UPLO="l") assert_allclose(w, tgt, rtol=rtol) # Check 'U' w = np.linalg.eigvalsh(Kup, UPLO="U") assert_allclose(w, tgt, rtol=rtol) # Check 'u' w = np.linalg.eigvalsh(Kup, UPLO="u") assert_allclose(w, tgt, rtol=rtol) def test_0_size(self): # Check that all kinds of 0-sized arrays work # class ArraySubclass(np.ndarray): # pass a = np.zeros((0, 1, 1), dtype=np.int_) # .view(ArraySubclass) res = linalg.eigvalsh(a) assert_(res.dtype.type is np.float64) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) a = np.zeros((0, 0), dtype=np.complex64) # .view(ArraySubclass) res = linalg.eigvalsh(a) assert_(res.dtype.type is np.float32) assert_equal((0,), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): pytest.xfail(reason="sort complex") # note that eigenvalue arrays returned by eig must be sorted since # their order isn't guaranteed. ev, evc = linalg.eigh(a) evalues, evectors = linalg.eig(a) evalues.sort(axis=-1) assert_almost_equal(ev, evalues) assert_allclose( dot_generalized(a, evc), np.asarray(ev)[..., None, :] * np.asarray(evc), rtol=get_rtol(ev.dtype), ) ev2, evc2 = linalg.eigh(a, "U") assert_almost_equal(ev2, evalues) assert_allclose( dot_generalized(a, evc2), np.asarray(ev2)[..., None, :] * np.asarray(evc2), rtol=get_rtol(ev.dtype), err_msg=repr(a), ) @instantiate_parametrized_tests class TestEigh(TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eigh(x) assert_equal(w.dtype, get_real_dtype(dtype)) assert_equal(v.dtype, dtype) def test_invalid(self): x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) assert_raises((RuntimeError, ValueError), np.linalg.eigh, x, UPLO="lrong") assert_raises((RuntimeError, ValueError), np.linalg.eigh, x, "lower") assert_raises((RuntimeError, ValueError), np.linalg.eigh, x, "upper") def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w, v = np.linalg.eigh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w, v = np.linalg.eigh(Klo, UPLO="L") assert_allclose(w, tgt, rtol=rtol) # Check 'l' w, v = np.linalg.eigh(Klo, UPLO="l") assert_allclose(w, tgt, rtol=rtol) # Check 'U' w, v = np.linalg.eigh(Kup, UPLO="U") assert_allclose(w, tgt, rtol=rtol) # Check 'u' w, v = np.linalg.eigh(Kup, UPLO="u") assert_allclose(w, tgt, rtol=rtol) def test_0_size(self): # Check that all kinds of 0-sized arrays work # class ArraySubclass(np.ndarray): # pass a = np.zeros((0, 1, 1), dtype=np.int_) # .view(ArraySubclass) res, res_v = linalg.eigh(a) assert_(res_v.dtype.type is np.float64) assert_(res.dtype.type is np.float64) assert_equal(a.shape, res_v.shape) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(a, np.ndarray)) a = np.zeros((0, 0), dtype=np.complex64) # .view(ArraySubclass) res, res_v = linalg.eigh(a) assert_(res_v.dtype.type is np.complex64) assert_(res.dtype.type is np.float32) assert_equal(a.shape, res_v.shape) assert_equal((0,), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(a, np.ndarray)) class _TestNormBase: dt = None dec = None @staticmethod def check_dtype(x, res): if issubclass(x.dtype.type, np.inexact): assert_equal(res.dtype, x.real.dtype) else: # For integer input, don't have to test float precision of output. assert_(issubclass(res.dtype.type, np.floating)) class _TestNormGeneral(_TestNormBase): def test_empty(self): assert_equal(norm([]), 0.0) assert_equal(norm(array([], dtype=self.dt)), 0.0) assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) def test_vector_return_type(self): a = np.array([1, 0, 1]) exact_types = "Bbhil" # np.typecodes["AllInteger"] inexact_types = "efdFD" # np.typecodes["AllFloat"] all_types = exact_types + inexact_types for each_type in all_types: at = a.astype(each_type) if each_type == np.dtype("float16"): # FIXME: move looping to parametrize, add decorators=[xfail] # pytest.xfail("float16**float64 => float64 (?)") raise SkipTest("float16**float64 => float64 (?)") an = norm(at, -np.inf) self.check_dtype(at, an) assert_almost_equal(an, 0.0) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered") an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) an = norm(at, 0) self.check_dtype(at, an) assert_almost_equal(an, 2) an = norm(at, 1) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, 2) self.check_dtype(at, an) assert_almost_equal(an, an.dtype.type(2.0) ** an.dtype.type(1.0 / 2.0)) an = norm(at, 4) self.check_dtype(at, an) assert_almost_equal(an, an.dtype.type(2.0) ** an.dtype.type(1.0 / 4.0)) an = norm(at, np.inf) self.check_dtype(at, an) assert_almost_equal(an, 1.0) def test_vector(self): a = [1, 2, 3, 4] b = [-1, -2, -3, -4] c = [-1, 2, -3, 4] def _test(v): np.testing.assert_almost_equal(norm(v), 30**0.5, decimal=self.dec) np.testing.assert_almost_equal(norm(v, inf), 4.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -inf), 1.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 1), 10.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 2), 30**0.5, decimal=self.dec) np.testing.assert_almost_equal( norm(v, -2), ((205.0 / 144) ** -0.5), decimal=self.dec ) np.testing.assert_almost_equal(norm(v, 0), 4, decimal=self.dec) for v in ( a, b, c, ): _test(v) for v in ( array(a, dtype=self.dt), array(b, dtype=self.dt), array(c, dtype=self.dt), ): _test(v) def test_axis(self): # Vector norms. # Compare the use of `axis` with computing the norm of each row # or column separately. A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] assert_almost_equal(norm(A, ord=order, axis=0), expected0) expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] assert_almost_equal(norm(A, ord=order, axis=1), expected1) # Matrix norms. B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) nd = B.ndim for order in [None, -2, 2, -1, 1, np.inf, -np.inf, "fro"]: for axis in itertools.combinations(range(-nd, nd), 2): row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if row_axis == col_axis: assert_raises( (RuntimeError, ValueError), norm, B, ord=order, axis=axis ) else: n = norm(B, ord=order, axis=axis) # The logic using k_index only works for nd = 3. # This has to be changed if nd is increased. k_index = nd - (row_axis + col_axis) if row_axis < col_axis: expected = [ norm(B[:].take(k, axis=k_index), ord=order) for k in range(B.shape[k_index]) ] else: expected = [ norm(B[:].take(k, axis=k_index).T, ord=order) for k in range(B.shape[k_index]) ] assert_almost_equal(n, expected) def test_keepdims(self): A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) allclose_err = "order {0}, axis = {1}" shape_err = "Shape mismatch found {0}, expected {1}, order={2}, axis={3}" # check the order=None, axis=None case expected = norm(A, ord=None, axis=None) found = norm(A, ord=None, axis=None, keepdims=True) assert_allclose( np.squeeze(found), expected, err_msg=allclose_err.format(None, None) ) expected_shape = (1, 1, 1) assert_( found.shape == expected_shape, shape_err.format(found.shape, expected_shape, None, None), ) # Vector norms. for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: for k in range(A.ndim): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose( np.squeeze(found), expected, err_msg=allclose_err.format(order, k) ) expected_shape = list(A.shape) expected_shape[k] = 1 expected_shape = tuple(expected_shape) assert_( found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k), ) # Matrix norms. for order in [None, -2, 2, -1, 1, np.inf, -np.inf, "fro", "nuc"]: for k in itertools.permutations(range(A.ndim), 2): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose( np.squeeze(found), expected, err_msg=allclose_err.format(order, k) ) expected_shape = list(A.shape) expected_shape[k[0]] = 1 expected_shape[k[1]] = 1 expected_shape = tuple(expected_shape) assert_( found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k), ) class _TestNorm2D(_TestNormBase): # Define the part for 2d arrays separately, so we can subclass this # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. def test_matrix_empty(self): assert_equal(norm(np.array([[]], dtype=self.dt)), 0.0) def test_matrix_return_type(self): a = np.array([[1, 0, 1], [0, 1, 1]]) exact_types = "Bbhil" # np.typecodes["AllInteger"] # float32, complex64, float64, complex128 types are the only types # allowed by `linalg`, which performs the matrix operations used # within `norm`. inexact_types = "fdFD" all_types = exact_types + inexact_types for each_type in all_types: at = a.astype(each_type) an = norm(at, -np.inf) self.check_dtype(at, an) assert_almost_equal(an, 2.0) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered") an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) an = norm(at, 1) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, 2) self.check_dtype(at, an) assert_almost_equal(an, 3.0 ** (1.0 / 2.0)) an = norm(at, -2) self.check_dtype(at, an) assert_almost_equal(an, 1.0) an = norm(at, np.inf) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, "fro") self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, "nuc") self.check_dtype(at, an) # Lower bar needed to support low precision floats. # They end up being off by 1 in the 7th place. np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) def test_matrix_2x2(self): A = np.array([[1, 3], [5, 7]], dtype=self.dt) assert_almost_equal(norm(A), 84**0.5) assert_almost_equal(norm(A, "fro"), 84**0.5) assert_almost_equal(norm(A, "nuc"), 10.0) assert_almost_equal(norm(A, inf), 12.0) assert_almost_equal(norm(A, -inf), 4.0) assert_almost_equal(norm(A, 1), 10.0) assert_almost_equal(norm(A, -1), 6.0) assert_almost_equal(norm(A, 2), 9.1231056256176615) assert_almost_equal(norm(A, -2), 0.87689437438234041) assert_raises((RuntimeError, ValueError), norm, A, "nofro") assert_raises((RuntimeError, ValueError), norm, A, -3) assert_raises((RuntimeError, ValueError), norm, A, 0) def test_matrix_3x3(self): # This test has been added because the 2x2 example # happened to have equal nuclear norm and induced 1-norm. # The 1/10 scaling factor accommodates the absolute tolerance # used in assert_almost_equal. A = (1 / 10) * np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) assert_almost_equal(norm(A), (1 / 10) * 89**0.5) assert_almost_equal(norm(A, "fro"), (1 / 10) * 89**0.5) assert_almost_equal(norm(A, "nuc"), 1.3366836911774836) assert_almost_equal(norm(A, inf), 1.1) assert_almost_equal(norm(A, -inf), 0.6) assert_almost_equal(norm(A, 1), 1.0) assert_almost_equal(norm(A, -1), 0.4) assert_almost_equal(norm(A, 2), 0.88722940323461277) assert_almost_equal(norm(A, -2), 0.19456584790481812) def test_bad_args(self): # Check that bad arguments raise the appropriate exceptions. A = np.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) # Using `axis=<integer>` or passing in a 1-D array implies vector # norms are being computed, so also using `ord='fro'` # or `ord='nuc'` or any other string raises a ValueError. assert_raises((RuntimeError, ValueError), norm, A, "fro", 0) assert_raises((RuntimeError, ValueError), norm, A, "nuc", 0) assert_raises((RuntimeError, ValueError), norm, [3, 4], "fro", None) assert_raises((RuntimeError, ValueError), norm, [3, 4], "nuc", None) assert_raises((RuntimeError, ValueError), norm, [3, 4], "test", None) # Similarly, norm should raise an exception when ord is any finite # number other than 1, 2, -1 or -2 when computing matrix norms. for order in [0, 3]: assert_raises((RuntimeError, ValueError), norm, A, order, None) assert_raises((RuntimeError, ValueError), norm, A, order, (0, 1)) assert_raises((RuntimeError, ValueError), norm, B, order, (1, 2)) # Invalid axis assert_raises((IndexError, np.AxisError), norm, B, None, 3) assert_raises((IndexError, np.AxisError), norm, B, None, (2, 3)) assert_raises((RuntimeError, ValueError), norm, B, None, (0, 1, 2)) class _TestNorm(_TestNorm2D, _TestNormGeneral): pass class TestNorm_NonSystematic(TestCase): def test_intmin(self): # Non-regression test: p-norm of signed integer would previously do # float cast and abs in the wrong order. x = np.array([-(2**31)], dtype=np.int32) old_assert_almost_equal(norm(x, ord=3), 2**31, decimal=5) # Separate definitions so we can use them for matrix tests. class _TestNormDoubleBase(_TestNormBase, TestCase): dt = np.double dec = 12 class _TestNormSingleBase(_TestNormBase, TestCase): dt = np.float32 dec = 6 class _TestNormInt64Base(_TestNormBase, TestCase): dt = np.int64 dec = 12 class TestNormDouble(_TestNorm, _TestNormDoubleBase, TestCase): pass class TestNormSingle(_TestNorm, _TestNormSingleBase, TestCase): pass class TestNormInt64(_TestNorm, _TestNormInt64Base): pass class TestMatrixRank(TestCase): def test_matrix_rank(self): # Full rank matrix assert_equal(4, matrix_rank(np.eye(4))) # rank deficient matrix I = np.eye(4) I[-1, -1] = 0.0 assert_equal(matrix_rank(I), 3) # All zeros - zero rank assert_equal(matrix_rank(np.zeros((4, 4))), 0) # 1 dimension - rank 1 unless all 0 assert_equal(matrix_rank([1, 0, 0, 0]), 1) assert_equal(matrix_rank(np.zeros((4,))), 0) # accepts array-like assert_equal(matrix_rank([1]), 1) # greater than 2 dimensions treated as stacked matrices ms = np.array([I, np.eye(4), np.zeros((4, 4))]) assert_equal(matrix_rank(ms), np.array([3, 4, 0])) # works on scalar assert_equal(matrix_rank(1), 1) def test_symmetric_rank(self): assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) # rank deficient matrix I = np.eye(4) I[-1, -1] = 0.0 assert_equal(3, matrix_rank(I, hermitian=True)) # manually supplied tolerance I[-1, -1] = 1e-8 assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) def test_reduced_rank(self): # Test matrices with reduced rank # rng = np.random.RandomState(20120714) np.random.seed(20120714) for i in range(100): # Make a rank deficient matrix X = np.random.normal(size=(40, 10)) X[:, 0] = X[:, 1] + X[:, 2] # Assert that matrix_rank detected deficiency assert_equal(matrix_rank(X), 9) X[:, 3] = X[:, 4] + X[:, 5] assert_equal(matrix_rank(X), 8) @instantiate_parametrized_tests class TestQR(TestCase): def check_qr(self, a): # This test expects the argument `a` to be an ndarray or # a subclass of an ndarray of inexact type. a_type = type(a) a_dtype = a.dtype m, n = a.shape k = min(m, n) # mode == 'complete' q, r = linalg.qr(a, mode="complete") assert_(q.dtype == a_dtype) assert_(r.dtype == a_dtype) assert_(isinstance(q, a_type)) assert_(isinstance(r, a_type)) assert_(q.shape == (m, m)) assert_(r.shape == (m, n)) assert_almost_equal(dot(q, r), a, single_decimal=5) assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) assert_almost_equal(np.triu(r), r) # mode == 'reduced' q1, r1 = linalg.qr(a, mode="reduced") assert_(q1.dtype == a_dtype) assert_(r1.dtype == a_dtype) assert_(isinstance(q1, a_type)) assert_(isinstance(r1, a_type)) assert_(q1.shape == (m, k)) assert_(r1.shape == (k, n)) assert_almost_equal(dot(q1, r1), a, single_decimal=5) assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) assert_almost_equal(np.triu(r1), r1) # mode == 'r' r2 = linalg.qr(a, mode="r") assert_(r2.dtype == a_dtype) assert_(isinstance(r2, a_type)) assert_almost_equal(r2, r1) @xpassIfTorchDynamo # (reason="torch does not allow qr(..., mode='raw'") @parametrize("m, n", [(3, 0), (0, 3), (0, 0)]) def test_qr_empty(self, m, n): k = min(m, n) a = np.empty((m, n)) self.check_qr(a) h, tau = np.linalg.qr(a, mode="raw") assert_equal(h.dtype, np.double) assert_equal(tau.dtype, np.double) assert_equal(h.shape, (n, m)) assert_equal(tau.shape, (k,)) @xpassIfTorchDynamo # (reason="torch does not allow qr(..., mode='raw'") def test_mode_raw(self): # The factorization is not unique and varies between libraries, # so it is not possible to check against known values. Functional # testing is a possibility, but awaits the exposure of more # of the functions in lapack_lite. Consequently, this test is # very limited in scope. Note that the results are in FORTRAN # order, hence the h arrays are transposed. a = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) # Test double h, tau = linalg.qr(a, mode="raw") assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (2, 3)) assert_(tau.shape == (2,)) h, tau = linalg.qr(a.T, mode="raw") assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (3, 2)) assert_(tau.shape == (2,)) def test_mode_all_but_economic(self): a = np.array([[1, 2], [3, 4]]) b = np.array([[1, 2], [3, 4], [5, 6]]) for dt in "fd": m1 = a.astype(dt) m2 = b.astype(dt) self.check_qr(m1) self.check_qr(m2) self.check_qr(m2.T) for dt in "fd": m1 = 1 + 1j * a.astype(dt) m2 = 1 + 1j * b.astype(dt) self.check_qr(m1) self.check_qr(m2) self.check_qr(m2.T) def check_qr_stacked(self, a): # This test expects the argument `a` to be an ndarray or # a subclass of an ndarray of inexact type. a_type = type(a) a_dtype = a.dtype m, n = a.shape[-2:] k = min(m, n) # mode == 'complete' q, r = linalg.qr(a, mode="complete") assert_(q.dtype == a_dtype) assert_(r.dtype == a_dtype) assert_(isinstance(q, a_type)) assert_(isinstance(r, a_type)) assert_(q.shape[-2:] == (m, m)) assert_(r.shape[-2:] == (m, n)) assert_almost_equal(matmul(q, r), a, single_decimal=5) I_mat = np.identity(q.shape[-1]) stack_I_mat = np.broadcast_to(I_mat, q.shape[:-2] + (q.shape[-1],) * 2) assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) assert_almost_equal(np.triu(r[..., :, :]), r) # mode == 'reduced' q1, r1 = linalg.qr(a, mode="reduced") assert_(q1.dtype == a_dtype) assert_(r1.dtype == a_dtype) assert_(isinstance(q1, a_type)) assert_(isinstance(r1, a_type)) assert_(q1.shape[-2:] == (m, k)) assert_(r1.shape[-2:] == (k, n)) assert_almost_equal(matmul(q1, r1), a, single_decimal=5) I_mat = np.identity(q1.shape[-1]) stack_I_mat = np.broadcast_to(I_mat, q1.shape[:-2] + (q1.shape[-1],) * 2) assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), stack_I_mat) assert_almost_equal(np.triu(r1[..., :, :]), r1) # mode == 'r' r2 = linalg.qr(a, mode="r") assert_(r2.dtype == a_dtype) assert_(isinstance(r2, a_type)) assert_almost_equal(r2, r1) @skipif(numpy.__version__ < "1.22", reason="NP_VER: fails on CI with numpy 1.21.2") @parametrize("size", [(3, 4), (4, 3), (4, 4), (3, 0), (0, 3)]) @parametrize("outer_size", [(2, 2), (2,), (2, 3, 4)]) @parametrize("dt", [np.single, np.double, np.csingle, np.cdouble]) def test_stacked_inputs(self, outer_size, size, dt): A = np.random.normal(size=outer_size + size).astype(dt) B = np.random.normal(size=outer_size + size).astype(dt) self.check_qr_stacked(A) self.check_qr_stacked(A + 1.0j * B) @instantiate_parametrized_tests class TestCholesky(TestCase): # TODO: are there no other tests for cholesky? @parametrize("shape", [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]) @parametrize("dtype", (np.float32, np.float64, np.complex64, np.complex128)) def test_basic_property(self, shape, dtype): # Check A = L L^H np.random.seed(1) a = np.random.randn(*shape) if np.issubdtype(dtype, np.complexfloating): a = a + 1j * np.random.randn(*shape) t = list(range(len(shape))) t[-2:] = -1, -2 a = np.matmul(a.transpose(t).conj(), a) a = np.asarray(a, dtype=dtype) c = np.linalg.cholesky(a) b = np.matmul(c, c.transpose(t).conj()) atol = 500 * a.shape[0] * np.finfo(dtype).eps assert_allclose(b, a, atol=atol, err_msg=f"{shape} {dtype}\n{a}\n{c}") def test_0_size(self): # class ArraySubclass(np.ndarray): # pass a = np.zeros((0, 1, 1), dtype=np.int_) # .view(ArraySubclass) res = linalg.cholesky(a) assert_equal(a.shape, res.shape) assert_(res.dtype.type is np.float64) # for documentation purpose: assert_(isinstance(res, np.ndarray)) a = np.zeros((1, 0, 0), dtype=np.complex64) # .view(ArraySubclass) res = linalg.cholesky(a) assert_equal(a.shape, res.shape) assert_(res.dtype.type is np.complex64) assert_(isinstance(res, np.ndarray)) class TestMisc(TestCase): @xpassIfTorchDynamo # (reason="endianness") def test_byteorder_check(self): # Byte order check should pass for native order if sys.byteorder == "little": native = "<" else: native = ">" for dtt in (np.float32, np.float64): arr = np.eye(4, dtype=dtt) n_arr = arr.newbyteorder(native) sw_arr = arr.newbyteorder("S").byteswap() assert_equal(arr.dtype.byteorder, "=") for routine in (linalg.inv, linalg.det, linalg.pinv): # Normal call res = routine(arr) # Native but not '=' assert_array_equal(res, routine(n_arr)) # Swapped assert_array_equal(res, routine(sw_arr)) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_generalized_raise_multiloop(self): # It should raise an error even if the error doesn't occur in the # last iteration of the ufunc inner loop invertible = np.array([[1, 2], [3, 4]]) non_invertible = np.array([[1, 1], [1, 1]]) x = np.zeros([4, 4, 2, 2])[1::2] x[...] = invertible x[0, 0] = non_invertible assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) def test_xerbla_override(self): # Check that our xerbla has been successfully linked in. If it is not, # the default xerbla routine is called, which prints a message to stdout # and may, or may not, abort the process depending on the LAPACK package. XERBLA_OK = 255 try: pid = os.fork() except (OSError, AttributeError): # fork failed, or not running on POSIX raise SkipTest("Not POSIX or fork failed.") # noqa: B904 if pid == 0: # child; close i/o file handles os.close(1) os.close(0) # Avoid producing core files. import resource resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) # These calls may abort. try: np.linalg.lapack_lite.xerbla() except ValueError: pass except Exception: os._exit(os.EX_CONFIG) try: a = np.array([[1.0]]) np.linalg.lapack_lite.dorgqr( 1, 1, 1, a, 0, a, a, 0, 0 ) # <- invalid value except ValueError as e: if "DORGQR parameter number 5" in str(e): # success, reuse error code to mark success as # FORTRAN STOP returns as success. os._exit(XERBLA_OK) # Did not abort, but our xerbla was not linked in. os._exit(os.EX_CONFIG) else: # parent pid, status = os.wait() if os.WEXITSTATUS(status) != XERBLA_OK: raise SkipTest("Numpy xerbla not linked in.") @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @slow def test_sdot_bug_8577(self): # Regression test that loading certain other libraries does not # result to wrong results in float32 linear algebra. # # There's a bug gh-8577 on OSX that can trigger this, and perhaps # there are also other situations in which it occurs. # # Do the check in a separate process. bad_libs = ["PyQt5.QtWidgets", "IPython"] template = textwrap.dedent( """ import sys {before} try: import {bad_lib} except ImportError: sys.exit(0) {after} x = np.ones(2, dtype=np.float32) sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) """ ) for bad_lib in bad_libs: code = template.format( before="import numpy as np", after="", bad_lib=bad_lib ) subprocess.check_call([sys.executable, "-c", code]) # Swapped import order code = template.format( after="import numpy as np", before="", bad_lib=bad_lib ) subprocess.check_call([sys.executable, "-c", code]) class TestMultiDot(TestCase): def test_basic_function_with_three_arguments(self): # multi_dot with three arguments uses a fast hand coded algorithm to # determine the optimal order. Therefore test it separately. A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) def test_basic_function_with_two_arguments(self): # separate code path with two arguments A = np.random.random((6, 2)) B = np.random.random((2, 6)) assert_almost_equal(multi_dot([A, B]), A.dot(B)) assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) def test_basic_function_with_dynamic_programming_optimization(self): # multi_dot with four or more arguments uses the dynamic programming # optimization and therefore deserve a separate A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) D = np.random.random((2, 1)) assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) def test_vector_as_first_argument(self): # The first argument can be 1-D A1d = np.random.random(2) # 1-D B = np.random.random((2, 6)) C = np.random.random((6, 2)) D = np.random.random((2, 2)) # the result should be 1-D assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) def test_vector_as_last_argument(self): # The last argument can be 1-D A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) D1d = np.random.random(2) # 1-D # the result should be 1-D assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) def test_vector_as_first_and_last_argument(self): # The first and last arguments can be 1-D A1d = np.random.random(2) # 1-D B = np.random.random((2, 6)) C = np.random.random((6, 2)) D1d = np.random.random(2) # 1-D # the result should be a scalar assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) def test_three_arguments_and_out(self): # multi_dot with three arguments uses a fast hand coded algorithm to # determine the optimal order. Therefore test it separately. A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) out = np.zeros((6, 2)) ret = multi_dot([A, B, C], out=out) assert out is ret assert_almost_equal(out, A.dot(B).dot(C)) assert_almost_equal(out, np.dot(A, np.dot(B, C))) def test_two_arguments_and_out(self): # separate code path with two arguments A = np.random.random((6, 2)) B = np.random.random((2, 6)) out = np.zeros((6, 6)) ret = multi_dot([A, B], out=out) assert out is ret assert_almost_equal(out, A.dot(B)) assert_almost_equal(out, np.dot(A, B)) def test_dynamic_programming_optimization_and_out(self): # multi_dot with four or more arguments uses the dynamic programming # optimization and therefore deserve a separate test A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) D = np.random.random((2, 1)) out = np.zeros((6, 1)) ret = multi_dot([A, B, C, D], out=out) assert out is ret assert_almost_equal(out, A.dot(B).dot(C).dot(D)) def test_dynamic_programming_logic(self): # Test for the dynamic programming part # This test is directly taken from Cormen page 376. arrays = [ np.random.random((30, 35)), np.random.random((35, 15)), np.random.random((15, 5)), np.random.random((5, 10)), np.random.random((10, 20)), np.random.random((20, 25)), ] m_expected = np.array( [ [0.0, 15750.0, 7875.0, 9375.0, 11875.0, 15125.0], [0.0, 0.0, 2625.0, 4375.0, 7125.0, 10500.0], [0.0, 0.0, 0.0, 750.0, 2500.0, 5375.0], [0.0, 0.0, 0.0, 0.0, 1000.0, 3500.0], [0.0, 0.0, 0.0, 0.0, 0.0, 5000.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ] ) s_expected = np.array( [ [0, 1, 1, 3, 3, 3], [0, 0, 2, 3, 3, 3], [0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5], [0, 0, 0, 0, 0, 0], ], dtype=int, ) s_expected -= 1 # Cormen uses 1-based index, python does not. s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) # Only the upper triangular part (without the diagonal) is interesting. assert_almost_equal(np.triu(s[:-1, 1:]), np.triu(s_expected[:-1, 1:])) assert_almost_equal(np.triu(m), np.triu(m_expected)) def test_too_few_input_arrays(self): assert_raises((RuntimeError, ValueError), multi_dot, []) assert_raises((RuntimeError, ValueError), multi_dot, [np.random.random((3, 3))]) @instantiate_parametrized_tests class TestTensorinv(TestCase): @parametrize( "arr, ind", [ (np.ones((4, 6, 8, 2)), 2), (np.ones((3, 3, 2)), 1), ], ) def test_non_square_handling(self, arr, ind): with assert_raises((LinAlgError, RuntimeError)): linalg.tensorinv(arr, ind=ind) @parametrize( "shape, ind", [ # examples from docstring ((4, 6, 8, 3), 2), ((24, 8, 3), 1), ], ) def test_tensorinv_shape(self, shape, ind): a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape assert_equal(actual, expected) @parametrize( "ind", [ 0, -2, ], ) def test_tensorinv_ind_limit(self, ind): a = np.eye(24).reshape(4, 6, 8, 3) with assert_raises((ValueError, RuntimeError)): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example a = np.eye(24).reshape(24, 8, 3) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) @instantiate_parametrized_tests class TestTensorsolve(TestCase): @parametrize( "a, axes", [ (np.ones((4, 6, 8, 2)), None), (np.ones((3, 3, 2)), (0, 2)), ], ) def test_non_square_handling(self, a, axes): with assert_raises((LinAlgError, RuntimeError)): b = np.ones(a.shape[:2]) linalg.tensorsolve(a, b, axes=axes) @skipif(numpy.__version__ < "1.22", reason="NP_VER: fails on CI with numpy 1.21.2") @parametrize( "shape", [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], ) def test_tensorsolve_result(self, shape): a = np.random.randn(*shape) b = np.ones(a.shape[:2]) x = np.linalg.tensorsolve(a, b) assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) class TestMisc2(TestCase): @xpassIfTorchDynamo # (reason="TODO") def test_unsupported_commontype(self): # linalg gracefully handles unsupported type arr = np.array([[1, -2], [2, 5]], dtype="float16") # with assert_raises_regex(TypeError, "unsupported in linalg"): with assert_raises(TypeError): linalg.cholesky(arr) # @slow # @pytest.mark.xfail(not HAS_LAPACK64, run=False, # reason="Numpy not compiled with 64-bit BLAS/LAPACK") # @requires_memory(free_bytes=16e9) @skip(reason="Bad memory reports lead to OOM in ci testing") def test_blas64_dot(self): n = 2**32 a = np.zeros([1, n], dtype=np.float32) b = np.ones([1, 1], dtype=np.float32) a[0, -1] = 1 c = np.dot(b, a) assert_equal(c[0, -1], 1) @skip(reason="lapack-lite specific") @xfail # ( # not HAS_LAPACK64, reason="Numpy not compiled with 64-bit BLAS/LAPACK" # ) def test_blas64_geqrf_lwork_smoketest(self): # Smoke test LAPACK geqrf lwork call with 64-bit integers dtype = np.float64 lapack_routine = np.linalg.lapack_lite.dgeqrf m = 2**32 + 1 n = 2**32 + 1 lda = m # Dummy arrays, not referenced by the lapack routine, so don't # need to be of the right size a = np.zeros([1, 1], dtype=dtype) work = np.zeros([1], dtype=dtype) tau = np.zeros([1], dtype=dtype) # Size query results = lapack_routine(m, n, a, lda, tau, work, -1, 0) assert_equal(results["info"], 0) assert_equal(results["m"], m) assert_equal(results["n"], m) # Should result to an integer of a reasonable size lwork = int(work.item()) assert_(2**32 < lwork < 2**42) if __name__ == "__main__": run_tests()
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
do
def do(self, a, b, tags): x = linalg.solve(a, b) assert_almost_equal(b, dot_generalized(a, x)) assert_(consistent_subclass(x, b))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_mode_all_but_economic
def test_mode_all_but_economic(self): a = np.array([[1, 2], [3, 4]]) b = np.array([[1, 2], [3, 4], [5, 6]]) for dt in "fd": m1 = a.astype(dt) m2 = b.astype(dt) self.check_qr(m1) self.check_qr(m2) self.check_qr(m2.T) for dt in "fd": m1 = 1 + 1j * a.astype(dt) m2 = 1 + 1j * b.astype(dt) self.check_qr(m1) self.check_qr(m2) self.check_qr(m2.T)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestQR(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_stacked_inputs
def test_stacked_inputs(self, outer_size, size, dt): A = np.random.normal(size=outer_size + size).astype(dt) B = np.random.normal(size=outer_size + size).astype(dt) self.check_qr_stacked(A) self.check_qr_stacked(A + 1.0j * B)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestQR(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_basic_property
def test_basic_property(self, shape, dtype): # Check A = L L^H np.random.seed(1) a = np.random.randn(*shape) if np.issubdtype(dtype, np.complexfloating): a = a + 1j * np.random.randn(*shape) t = list(range(len(shape))) t[-2:] = -1, -2 a = np.matmul(a.transpose(t).conj(), a) a = np.asarray(a, dtype=dtype) c = np.linalg.cholesky(a) b = np.matmul(c, c.transpose(t).conj()) atol = 500 * a.shape[0] * np.finfo(dtype).eps assert_allclose(b, a, atol=atol, err_msg=f"{shape} {dtype}\n{a}\n{c}")
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] @instantiate_parametrized_tests class TestCholesky(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_generalized_raise_multiloop
def test_generalized_raise_multiloop(self): # It should raise an error even if the error doesn't occur in the # last iteration of the ufunc inner loop invertible = np.array([[1, 2], [3, 4]]) non_invertible = np.array([[1, 1], [1, 1]]) x = np.zeros([4, 4, 2, 2])[1::2] x[...] = invertible x[0, 0] = non_invertible assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class TestMisc(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_sdot_bug_8577
def test_sdot_bug_8577(self): # Regression test that loading certain other libraries does not # result to wrong results in float32 linear algebra. # # There's a bug gh-8577 on OSX that can trigger this, and perhaps # there are also other situations in which it occurs. # # Do the check in a separate process. bad_libs = ["PyQt5.QtWidgets", "IPython"] template = textwrap.dedent( """ import sys {before} try: import {bad_lib} except ImportError: sys.exit(0) {after} x = np.ones(2, dtype=np.float32) sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) """ ) for bad_lib in bad_libs: code = template.format( before="import numpy as np", after="", bad_lib=bad_lib ) subprocess.check_call([sys.executable, "-c", code]) # Swapped import order code = template.format( after="import numpy as np", before="", bad_lib=bad_lib ) subprocess.check_call([sys.executable, "-c", code])
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] class TestMisc(TestCase): import resource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_basic_function_with_three_arguments
def test_basic_function_with_three_arguments(self): # multi_dot with three arguments uses a fast hand coded algorithm to # determine the optimal order. Therefore test it separately. A = np.random.random((6, 2)) B = np.random.random((2, 6)) C = np.random.random((6, 2)) assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMultiDot(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_tensorinv_ind_limit
def test_tensorinv_ind_limit(self, ind): a = np.eye(24).reshape(4, 6, 8, 3) with assert_raises((ValueError, RuntimeError)): linalg.tensorinv(a=a, ind=ind)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource @instantiate_parametrized_tests class TestTensorinv(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_tensorinv_result
def test_tensorinv_result(self): # mimic a docstring example a = np.eye(24).reshape(24, 8, 3) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource @instantiate_parametrized_tests class TestTensorinv(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_tensorsolve_result
def test_tensorsolve_result(self, shape): a = np.random.randn(*shape) b = np.ones(a.shape[:2]) x = np.linalg.tensorsolve(a, b) assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource @instantiate_parametrized_tests class TestTensorsolve(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/linalg/test_linalg.py
test_blas64_geqrf_lwork_smoketest
def test_blas64_geqrf_lwork_smoketest(self): # Smoke test LAPACK geqrf lwork call with 64-bit integers dtype = np.float64 lapack_routine = np.linalg.lapack_lite.dgeqrf m = 2**32 + 1 n = 2**32 + 1 lda = m # Dummy arrays, not referenced by the lapack routine, so don't # need to be of the right size a = np.zeros([1, 1], dtype=dtype) work = np.zeros([1], dtype=dtype) tau = np.zeros([1], dtype=dtype) # Size query results = lapack_routine(m, n, a, lda, tau, work, -1, 0) assert_equal(results["info"], 0) assert_equal(results["m"], m) assert_equal(results["n"], m) # Should result to an integer of a reasonable size lwork = int(work.item()) assert_(2**32 < lwork < 2**42)
import functools import itertools import os import subprocess import sys import textwrap import traceback from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import numpy import pytest from numpy.linalg.linalg import _multi_dot_matrix_chain_order from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, slowTest as slow, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.testing import ( # assert_raises_regex, HAS_LAPACK64, IS_WASM assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) import torch._numpy as np from torch._numpy import ( array, asarray, atleast_2d, cdouble, csingle, dot, double, identity, inf, linalg, matmul, single, swapaxes, ) from torch._numpy.linalg import ( LinAlgError, matrix_power, matrix_rank, multi_dot, norm, ) from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) skip = functools.partial(skipif, True) IS_WASM = False HAS_LAPACK64 = False old_assert_almost_equal = assert_almost_equal all_tags = { "square", "nonsquare", "hermitian", # mutually exclusive "generalized", "size-0", "strided", # optional additions } CASES = [] import resource class TestMisc2(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_andshape_array
def test_andshape_array(self, func): t = w.asarray([[1, 2, 3], [4, 5, 6]]) shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} ta = func(t, **shape_dict) assert isinstance(ta, w.ndarray) assert ta.shape == self.shape
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] @instantiate_parametrized_tests class TestOneArrAndShape(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_toscalar_array
def test_toscalar_array(self, func, np_func): t = w.asarray([[1, 2, 3], [4, 5, 6]]) ta = func(t) tn = np_func(t) assert not isinstance(ta, w.ndarray) assert ta == tn
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] @instantiate_parametrized_tests class TestOneArrToScalar(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_shape
def test_shape(self, func): a = func(self.shape) assert isinstance(a, w.ndarray) assert a.shape == self.shape
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] @instantiate_parametrized_tests class TestShapeLikeToArray(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_several
def test_several(self, func): arys = ( torch.Tensor([[1, 2, 3], [4, 5, 6]]), w.asarray([[1, 2, 3], [4, 5, 6]]), [[1, 2, 3], [4, 5, 6]], ) result = func(*arys) assert isinstance(result, (tuple, list)) assert len(result) == len(arys) assert all(isinstance(_, w.ndarray) for _ in result)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] @instantiate_parametrized_tests class TestSequenceOfArrays(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_several
def test_several(self, func): arys = ( torch.Tensor([[1, 2, 3], [4, 5, 6]]), w.asarray([[1, 2, 3], [4, 5, 6]]), [[1, 2, 3], [4, 5, 6]], ) result = func(*arys) assert isinstance(result, (tuple, list)) assert len(result) == len(arys) assert all(isinstance(_, w.ndarray) for _ in result)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] @instantiate_parametrized_tests class TestSequenceOfArrays(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_asarray_array
def test_asarray_array(self, func): a = w.asarray([[1.0, 2, 3], [4, 5, 6]]) la = func(a) assert isinstance(la, w.ndarray)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary @instantiate_parametrized_tests class TestOneArr(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_argstoarray_simple
def test_argstoarray_simple(self, func, args): a = func(*args) assert isinstance(a, w.ndarray)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] @instantiate_parametrized_tests class TestPythonArgsToArray(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_unknown_args
def test_unknown_args(self): # Check that unknown args to decorated functions fail a = w.arange(7) % 2 == 0 # unknown positional args with assert_raises(TypeError): w.nonzero(a, "kaboom") # unknown kwarg with assert_raises(TypeError): w.nonzero(a, oops="ouch")
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestNormalizations(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_too_few_args_positional
def test_too_few_args_positional(self): with assert_raises(TypeError): w.nonzero()
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestNormalizations(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_unknown_args_with_defaults
def test_unknown_args_with_defaults(self): # check a function 5 arguments and 4 defaults: this should work w.eye(3) # five arguments, four defaults: this should fail with assert_raises(TypeError): w.eye()
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestNormalizations(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_copyto_basic
def test_copyto_basic(self): dst = w.empty(4) src = w.arange(4) w.copyto(dst, src) assert (dst == src).all()
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestCopyTo(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_copytobcast
def test_copytobcast(self): dst = w.empty((4, 2)) src = w.arange(4) # cannot broadcast => error out with assert_raises(RuntimeError): w.copyto(dst, src) # broadcast src against dst dst = w.empty((2, 4)) w.copyto(dst, src) assert (dst == src).all()
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestCopyTo(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_asarray_array
def test_asarray_array(self, func): a = w.asarray([[1.0, 2, 3], [4, 5, 6]]) la = func(a) assert isinstance(la, w.ndarray)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary @instantiate_parametrized_tests class TestOneArr(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_andaxis_array
def test_andaxis_array(self, func, axis): t = w.asarray([[1.0, 2, 3], [4, 5, 6]]) ta = func(t, axis=axis) assert isinstance(ta, w.ndarray)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] @instantiate_parametrized_tests class TestOneArrAndAxis(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_andtuple_array
def test_andtuple_array(self, func, axes): t = w.asarray([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]) ta = func(t, axes=axes) assert isinstance(ta, w.ndarray) if axes is None: newshape = (3, 2, 1) else: newshape = tuple(t.shape[axes[i]] for i in range(t.ndim)) assert ta.shape == newshape
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] @instantiate_parametrized_tests class TestOneArrAndAxesTuple(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_copyto_typecast
def test_copyto_typecast(self): dst = w.empty(4, dtype=int) src = w.arange(4, dtype=float) with assert_raises(TypeError): w.copyto(dst, src, casting="no") # force the type cast w.copyto(dst, src, casting="unsafe") assert (dst == src).all()
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestCopyTo(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_divmod_out
def test_divmod_out(self): x1 = w.arange(8, 15) x2 = w.arange(4, 11) out = (w.empty_like(x1), w.empty_like(x1)) quot, rem = w.divmod(x1, x2, out=out) assert_equal(quot, x1 // x2) assert_equal(rem, x1 % x2) out1, out2 = out assert quot is out[0] assert rem is out[1]
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestDivmod(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_divmod_pos_only
def test_divmod_pos_only(self): x1 = [4, 5, 6] x2 = [2, 1, 2] out1, out2 = w.empty_like(x1), w.empty_like(x1) quot, rem = w.divmod(x1, x2, out1, out2) assert quot is out1 assert rem is out2
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestDivmod(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_divmod_no_out
def test_divmod_no_out(self): # check that the out= machinery handles no out at all x1 = w.array([4, 5, 6]) x2 = w.array([2, 1, 2]) quot, rem = w.divmod(x1, x2) assert_equal(quot, x1 // x2) assert_equal(rem, x1 % x2)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestDivmod(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_divmod_out_both_pos_and_kw
def test_divmod_out_both_pos_and_kw(self): o = w.empty(1) with assert_raises(TypeError): w.divmod(1, 2, o, o, out=(o, o))
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestDivmod(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_nimpl_basic
def test_nimpl_basic(self): # smoke test that the "NotImplemented" annotation is picked up with assert_raises(NotImplementedError): w.empty(3, like="ooops")
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestSmokeNotImpl(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_set_default_float
def test_set_default_float(self, dt): try: w.set_default_dtype(fp_dtype=dt) x = w.empty(3) z = x + 1j * x assert x.dtype.torch_dtype == torch.float32 assert z.dtype.torch_dtype == torch.complex64 finally: # restore the w.set_default_dtype(fp_dtype="numpy")
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] @instantiate_parametrized_tests class TestDefaultDtype(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_exported_objects
def test_exported_objects(self): exported_fns = ( x for x in dir(w) if inspect.isfunction(getattr(w, x)) and not x.startswith("_") and x != "set_default_dtype" ) diff = set(exported_fns).difference(set(dir(_np))) assert len(diff) == 0, str(diff)
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] @skip(_np.__version__ <= "1.23", reason="from_dlpack is new in NumPy 1.23") class TestExport(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_equal
def test_equal(self): assert_allclose( np.equal(0.5, 0.6), equal(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_float_power
def test_float_power(self): assert_allclose( np.float_power(0.5, 0.6), float_power(0.5, 0.6), atol=1e-7, check_dtype=False, )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_floor_divide
def test_floor_divide(self): assert_allclose( np.floor_divide(0.5, 0.6), floor_divide(0.5, 0.6), atol=1e-7, check_dtype=False, )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_fmax
def test_fmax(self): assert_allclose(np.fmax(0.5, 0.6), fmax(0.5, 0.6), atol=1e-7, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_fmin
def test_fmin(self): assert_allclose(np.fmin(0.5, 0.6), fmin(0.5, 0.6), atol=1e-7, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_fmod
def test_fmod(self): assert_allclose(np.fmod(0.5, 0.6), fmod(0.5, 0.6), atol=1e-7, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_gcd
def test_gcd(self): assert_allclose(np.gcd(5, 6), gcd(5, 6), atol=1e-7, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_greater
def test_greater(self): assert_allclose( np.greater(0.5, 0.6), greater(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_greater_equal
def test_greater_equal(self): assert_allclose( np.greater_equal(0.5, 0.6), greater_equal(0.5, 0.6), atol=1e-7, check_dtype=False, )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_heaviside
def test_heaviside(self): assert_allclose( np.heaviside(0.5, 0.6), heaviside(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_arrays_in_lists
def test_arrays_in_lists(self): lst = [[1, 2], [3, w.array(4)]] assert_equal(w.asarray(lst), [[1, 2], [3, 4]])
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestCtorNested(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_basic.py
test_f16_on_cuda
def test_f16_on_cuda(self): # make sure operations with float16 tensors give same results on CUDA and on CPU t = torch.arange(5, dtype=torch.float16) assert_allclose(w.vdot(t.cuda(), t.cuda()), w.vdot(t, t)) assert_allclose(w.inner(t.cuda(), t.cuda()), w.inner(t, t)) assert_allclose(w.matmul(t.cuda(), t.cuda()), w.matmul(t, t)) assert_allclose(w.einsum("i,i", t.cuda(), t.cuda()), w.einsum("i,i", t, t)) assert_allclose(w.mean(t.cuda()), w.mean(t)) assert_allclose(w.cov(t.cuda(), t.cuda()), w.cov(t, t).tensor.cuda()) assert_allclose(w.corrcoef(t.cuda()), w.corrcoef(t).tensor.cuda())
import functools import inspect from unittest import expectedFailure as xfail, skipIf as skip import numpy as _np from pytest import raises as assert_raises import torch import torch._numpy as w import torch._numpy._ufuncs as _ufuncs import torch._numpy._util as _util from torch._numpy.testing import assert_allclose, assert_equal from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) one_arg_funcs = [ w.asarray, w.empty_like, w.ones_like, w.zeros_like, functools.partial(w.full_like, fill_value=42), w.corrcoef, w.squeeze, w.argmax, # w.bincount, # XXX: input dtypes w.prod, w.sum, w.real, w.imag, w.angle, w.real_if_close, w.isreal, w.iscomplex, w.isneginf, w.isposinf, w.i0, w.copy, w.array, w.round, w.around, w.flip, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, w.flatnonzero, ] ufunc_names = _ufuncs._unary one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] arr_shape_funcs = [ w.reshape, w.empty_like, w.ones_like, functools.partial(w.full_like, fill_value=42), w.broadcast_to, ] one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] seq_to_single_funcs = [ w.concatenate, w.stack, w.vstack, w.hstack, w.dstack, w.column_stack, w.row_stack, ] single_to_seq_funcs = ( w.nonzero, # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 # w.tril_indices_from, # w.triu_indices_from, w.where, ) funcs_and_args = [ (w.linspace, (0, 10, 11)), (w.logspace, (1, 2, 5)), (w.logspace, (1, 2, 5, 11)), (w.geomspace, (1, 1000, 5, 11)), (w.eye, (5, 6)), (w.identity, (3,)), (w.arange, (5,)), (w.arange, (5, 8)), (w.arange, (5, 8, 0.5)), (w.tri, (3, 3, -1)), ] class TestMisc(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_add
def test_add(self): assert_allclose(np.add(0.5, 0.6), add(0.5, 0.6), atol=1e-7, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_arctan2
def test_arctan2(self): assert_allclose( np.arctan2(0.5, 0.6), arctan2(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_bitwise_and
def test_bitwise_and(self): assert_allclose( np.bitwise_and(5, 6), bitwise_and(5, 6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_bitwise_or
def test_bitwise_or(self): assert_allclose( np.bitwise_or(5, 6), bitwise_or(5, 6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_bitwise_xor
def test_bitwise_xor(self): assert_allclose( np.bitwise_xor(5, 6), bitwise_xor(5, 6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_copysign
def test_copysign(self): assert_allclose( np.copysign(0.5, 0.6), copysign(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_divide
def test_divide(self): assert_allclose( np.divide(0.5, 0.6), divide(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_binary_ufuncs.py
test_subtract
def test_subtract(self): assert_allclose( np.subtract(0.5, 0.6), subtract(0.5, 0.6), atol=1e-7, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestBinaryUfuncBasic(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_dtype.py
test_convert_np_dtypes
def test_convert_np_dtypes(self, name, np_dtype): tnp_dtype = tnp.dtype(np_dtype) if name == "bool_": assert tnp_dtype == tnp.bool_ elif tnp_dtype.name == "bool_": assert name.startswith("bool") else: assert tnp_dtype.name == name
from unittest import expectedFailure as xfail import numpy import torch._numpy as tnp from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TestCase, ) dtype_names = [ "bool_", *[f"int{w}" for w in [8, 16, 32, 64]], *[f"uint{w}" for w in [8, 16, 32, 64]], *[f"float{w}" for w in [16, 32, 64]], *[f"complex{w}" for w in [64, 128]], ] np_dtype_params = [] np_dtype_params = [ subtest(("bool", "bool"), name="bool"), subtest( ("bool", numpy.dtype("bool")), name="numpy.dtype('bool')", decorators=[xfail], # reason="XXX: np.dtype() objects not supported"), ), ] @instantiate_parametrized_tests class TestConvertDType(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_function_base.py
test_basic
def test_basic(self): result = np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) assert_equal(result, np.arange(1, 10, dtype=int)) # When `axis` is specified, `values` must have the correct shape. result = np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) assert_equal(result, np.arange(1, 10, dtype=int).reshape((3, 3))) with pytest.raises((RuntimeError, ValueError)): np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
import pytest from torch.testing._internal.common_utils import ( run_tests, TEST_WITH_TORCHDYNAMO, TestCase, ) import numpy as np from numpy.testing import assert_equal import torch._numpy as np from torch._numpy.testing import assert_equal class TestAppend(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_ndarray_methods.py
test_indexing_simple
def test_indexing_simple(self): a = np.array([[1, 2, 3], [4, 5, 6]]) assert isinstance(a[0, 0], np.ndarray) assert isinstance(a[0, :], np.ndarray) assert a[0, :].tensor._base is a.tensor
import itertools from unittest import expectedFailure as xfail, skipIf as skipif import numpy import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_equal import torch._numpy as np from torch._numpy.testing import assert_equal class TestIndexing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_ndarray_methods.py
test_setitem
def test_setitem(self): a = np.array([[1, 2, 3], [4, 5, 6]]) a[0, 0] = 8 assert isinstance(a, np.ndarray) assert_equal(a, [[8, 2, 3], [4, 5, 6]])
import itertools from unittest import expectedFailure as xfail, skipIf as skipif import numpy import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_equal import torch._numpy as np from torch._numpy.testing import assert_equal class TestIndexing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_ndarray_methods.py
test_reshape_method
def test_reshape_method(self): arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) arr_shape = arr.shape tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] # reshape(*shape_tuple) assert np.all(arr.reshape(2, 6) == tgt) assert arr.reshape(2, 6).tensor._base is arr.tensor # reshape keeps the base assert arr.shape == arr_shape # arr is intact # XXX: move out to dedicated test(s) assert arr.reshape(2, 6).tensor._base is arr.tensor # reshape(shape_tuple) assert np.all(arr.reshape((2, 6)) == tgt) assert arr.reshape((2, 6)).tensor._base is arr.tensor assert arr.shape == arr_shape tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] assert np.all(arr.reshape(3, 4) == tgt) assert arr.reshape(3, 4).tensor._base is arr.tensor assert arr.shape == arr_shape assert np.all(arr.reshape((3, 4)) == tgt) assert arr.reshape((3, 4)).tensor._base is arr.tensor assert arr.shape == arr_shape # XXX : order='C' / 'F' # tgt = [[1, 4, 7, 10], # [2, 5, 8, 11], # [3, 6, 9, 12]] # assert np.all(arr.T.reshape((3, 4), order='C') == tgt) # # tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] # assert_equal(arr.reshape((3, 4), order='F'), tgt) #
import itertools from unittest import expectedFailure as xfail, skipIf as skipif import numpy import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_equal import torch._numpy as np from torch._numpy.testing import assert_equal class TestReshape(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added