library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_tensor
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
run_tests
|
def run_tests(S, td=None):
D = S.coalesce().to_dense().detach().requires_grad_(True)
if td is None:
S_sum = torch.sparse.sum(S)
D_sum = D.sum()
self.assertEqual(S_sum.item(), D_sum.item())
def fn(S):
res = torch.sparse.sum(S)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
else:
S_sum = torch.sparse.sum(S, td)
D_sum = D.sum(td)
self.assertEqual(S_sum.to_dense() if S_sum.is_sparse else S_sum, D_sum)
def fn(S):
res = torch.sparse.sum(S, td)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
nnz = 10
sparse_dims = 2
with_size = [5, 5, 1, 4] # use a dense dim = 1 to test for squeeze
test_dims = []
for i in range(1, 5):
test_dims += itertools.combinations(range(len(with_size)), i)
# https://github.com/pytorch/pytorch/issues/16501
x = torch.tensor([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 2.]], dtype=dtype, device=device).to_sparse()
self.assertEqual(torch.sparse.sum(x, dim=0), torch.sparse.sum(x, dim=-2))
self.assertEqual(torch.sum(x.to_dense(), dim=0), torch.sparse.sum(x, dim=0).to_dense())
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
# dim out of range
self.assertRaises(IndexError, lambda: torch.sparse.sum(S, 5))
# dim 0 appears multiple times in the list of dims
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(S, [0, 0]))
# sum an empty tensor
empty_S = torch.sparse_coo_tensor(size=with_size, dtype=dtype, device=device)
self.assertEqual(torch.sparse.sum(empty_S, [0]).to_dense(), torch.sum(empty_S.to_dense(), [0]))
self.assertEqual(torch.sparse.sum(empty_S), torch.tensor(0, dtype=dtype, device=device))
empty_S.requires_grad_(True)
empty_S_sum = torch.sparse.sum(empty_S)
empty_S_sum.backward()
self.assertEqual(empty_S.grad.to_dense(), empty_S.clone().detach().to_dense())
# test values().sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True))
for test_dim in test_dims:
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True), test_dim)
|
def run_tests(S, td=None):
D = S.coalesce().to_dense().detach().requires_grad_(True)
if td is None:
S_sum = torch.sparse.sum(S)
D_sum = D.sum()
self.assertEqual(S_sum.item(), D_sum.item())
def fn(S):
return torch.sparse.sum(S)
gradcheck(fn, (S,), masked=True)
else:
S_sum = torch.sparse.sum(S, td)
D_sum = D.sum(td)
self.assertEqual(S_sum.to_dense() if S_sum.is_sparse else S_sum, D_sum)
def fn(S):
res = torch.sparse.sum(S, td)
return res.to_dense(masked_grad=True)
gradcheck(fn, (S,), masked=True)
nnz = 10
sparse_dims = 2
with_size = [5, 5, 1, 4] # use a dense dim = 1 to test for squeeze
test_dims = []
for i in range(1, 5):
test_dims += itertools.combinations(range(len(with_size)), i)
# https://github.com/pytorch/pytorch/issues/16501
x = torch.tensor([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 2.]], dtype=dtype, device=device).to_sparse()
self.assertEqual(torch.sparse.sum(x, dim=0), torch.sparse.sum(x, dim=-2))
self.assertEqual(torch.sum(x.to_dense(), dim=0), torch.sparse.sum(x, dim=0).to_dense())
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
# dim out of range
self.assertRaises(IndexError, lambda: torch.sparse.sum(S, 5))
# dim 0 appears multiple times in the list of dims
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(S, [0, 0]))
# sum an empty tensor
empty_S = torch.sparse_coo_tensor(size=with_size, dtype=dtype, device=device)
self.assertEqual(torch.sparse.sum(empty_S, [0]).to_dense(), torch.sum(empty_S.to_dense(), [0]))
self.assertEqual(torch.sparse.sum(empty_S), torch.tensor(0, dtype=dtype, device=device))
empty_S.requires_grad_(True)
empty_S_sum = torch.sparse.sum(empty_S)
empty_S_sum.backward()
self.assertEqual(empty_S.grad.to_dense(), empty_S.clone().detach().to_dense())
# test values().sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True))
for test_dim in test_dims:
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True), test_dim)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
_test_sparse_mask_fixed
|
def _test_sparse_mask_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.tensor([1, 2, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
dense = torch.tensor([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
], dtype=dtype, device=device)
exp_v = torch.tensor([7, 14, 3, 20], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse()
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
dense = torch.empty([5, 4, 0], dtype=dtype, device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 0]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
_test_sparse_mask_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
# check repetitions and matchings in the intersection
lhs = torch.randint(0, 5, (100,), device=device)
rhs = torch.randint(0, 5, (100,), device=device).to_sparse()
self.assertEqual(lhs.to_sparse().sparse_mask(rhs), lhs.sparse_mask(rhs))
|
def _test_sparse_mask_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.tensor([1, 2, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
dense = torch.tensor([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
], dtype=dtype, device=device)
exp_v = torch.tensor([7, 14, 3, 20], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse()
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
dense = torch.empty([5, 4, 0], dtype=dtype, device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 0]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
_test_sparse_mask_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
# check repetitions and matchings in the intersection
lhs = torch.randint(0, 5, (100,), device=device)
rhs = torch.randint(0, 5, (100,), device=device).to_sparse()
self.assertEqual(lhs.to_sparse().sparse_mask(rhs), lhs.sparse_mask(rhs))
# check coalesce
sparse_c = torch.rand(3, 3, device=device).to_sparse()
sparse_unc = torch.rand(3, 3, device=device).to_sparse()._coalesced_(False)
for lhs, rhs in [(sparse_c, sparse_unc), (sparse_unc, sparse_c)]:
res_all_sparse = lhs.sparse_mask(rhs)
res_dense_sparse = lhs.to_dense().sparse_mask(rhs)
self.assertEqual(res_all_sparse.coalesce(), res_dense_sparse.coalesce())
self.assertEqual(rhs.is_coalesced(), res_all_sparse.is_coalesced())
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_empty_full
|
def test_empty_full(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.device_count() > 0:
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
|
def test_empty_full(self, device, dtype, requires_grad):
if requires_grad and not (dtype.is_floating_point or dtype.is_complex):
self.skipTest(f'requires_grad==True requires float or complex dtype, got {dtype}')
self._test_empty_full(device, dtype, requires_grad)
if torch.cuda.is_available():
self._test_empty_full(None, dtype, requires_grad)
self._test_empty_full(torch.device('cuda:0'), dtype, requires_grad)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_empty_full
|
def test_empty_full(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.device_count() > 0:
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
|
def test_empty_full(self, device, dtype, requires_grad):
if requires_grad and not (dtype.is_floating_point or dtype.is_complex):
self.skipTest(f'requires_grad==True requires float or complex dtype, got {dtype}')
self._test_empty_full(device, dtype, requires_grad)
if torch.cuda.is_available():
self._test_empty_full(None, dtype, requires_grad)
self._test_empty_full(torch.device('cuda:0'), dtype, requires_grad)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_is_sparse
|
def test_is_sparse(self, device):
x = torch.randn(3, 3)
self.assertFalse(x.is_sparse)
x = torch.randn(3, 3, 0)
self.assertFalse(x.is_sparse)
x = self.legacy_sparse_tensor()
self.assertTrue(x.is_sparse)
x = self.sparse_empty(1, 0, device=device)
self.assertTrue(x.is_sparse)
|
def test_is_sparse(self, device):
x = torch.randn(3, 3)
self.assertFalse(x.is_sparse)
x = torch.randn(3, 3, 0)
self.assertFalse(x.is_sparse)
x = self.sparse_empty(1, 0, device=device)
self.assertTrue(x.is_sparse)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
do_test
|
def do_test(t):
y = t.new().resize_as_(t).zero_()
self.assertEqual(y.shape, t.shape)
# Check that y can be added to t. Currently, this requires that
# sparse_dim and dense_dim match.
self.assertEqual(t, t + y)
do_test(self.legacy_sparse_tensor())
do_test(self.sparse_empty([3, 0], device=device))
do_test(self.sparse_empty([3, 3], device=device))
|
def do_test(t):
y = t.new().resize_as_(t).zero_()
self.assertEqual(y.shape, t.shape)
# Check that y can be added to t. Currently, this requires that
# sparse_dim and dense_dim match.
self.assertEqual(t, t + y)
do_test(self.sparse_empty([3, 0], device=device))
do_test(self.sparse_empty([3, 3], device=device))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
check_device
|
def check_device(x, device_id):
self.assertEqual(x.get_device(), device_id)
self.assertEqual(x._values().get_device(), device_id)
self.assertEqual(x._indices().get_device(), device_id)
dev1, dev2 = devices[0], devices[1]
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3]), device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3, 0]), device=1)
check_device(x, 1)
x = self.sparse_empty(3, device=1)
check_device(x, 1)
x = self.sparse_empty(3, 0, device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3])))
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3, 0])))
|
def check_device(x, device_id):
self.assertEqual(x.get_device(), device_id)
self.assertEqual(x._values().get_device(), device_id)
self.assertEqual(x._indices().get_device(), device_id)
dev1, dev2 = devices[0], devices[1]
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3]), device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3, 0]), device=1)
check_device(x, 1)
x = self.sparse_empty(3, device=1)
check_device(x, 1)
x = self.sparse_empty(3, 0, device=1)
check_device(x, 1)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_factory_default
|
def test_factory_default(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = self.index_tensor([[]], device=device)
expected_size = torch.Size([0])
self.assertEqual(tensor._indices(), expected_indices)
self.assertEqual(tensor.shape, expected_size)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_tensor
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_legacy_new_device
|
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
x = torch.sparse_coo_tensor(i, v, size, device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
x = torch.sparse_coo_tensor(i, v, size, device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
|
def test_legacy_new_device(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
x = torch.sparse_coo_tensor(i, v, size, device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
if torch.cuda.is_available():
x = torch.sparse_coo_tensor(i, v, size, device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse.py
|
test_legacy_constructor
|
def test_legacy_constructor(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v.storage()))
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v))
self.assertEqual(torch.sparse_coo, torch.sparse.FloatTensor(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor([6]))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_sparse.py
|
test_meta
|
def test_meta(self, dtype, layout):
if layout is torch.sparse_coo:
self._test_meta_sparse_coo(dtype)
else:
for batchsize, densesize in itertools.product([(), (2,)], [(), (3,)]):
self._test_meta_sparse_compressed(dtype, layout, batchsize, densesize)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseMeta(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
test_print_meta
|
def test_print_meta(self, dtype, layout):
printed = []
for batchsize, sparsesize, densesize in itertools.product(
[(), (2,)], [(4, 6), (3, 5, 7)], [(), (3,)]
):
if layout is torch.sparse_coo and batchsize:
# COO tensors don't have batch dimensions
continue
if layout is not torch.sparse_coo and len(sparsesize) != 2:
# CSR/CSC/BSR/BSC tensors must have 2 sparse dimensions
continue
printed += self._test_print_meta_data(dtype, layout, batchsize, sparsesize, densesize)
orig_maxDiff = self.maxDiff
self.maxDiff = None
try:
self.assertExpected('\n'.join(printed))
self.maxDiff = orig_maxDiff
except Exception:
self.maxDiff = orig_maxDiff
raise
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseMeta(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
assertEqualAttrs
|
def assertEqualAttrs(x, y, expected_shape):
self.assertEqual(x.shape, expected_shape)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.layout, y.layout)
if not x.is_meta:
self.assertEqual(x.device, y.device)
if x.layout is torch.sparse_coo:
assertEqualAttrs(x._indices(), y._indices(), (*y._indices().shape[:-1], expected_nnz))
assertEqualAttrs(x._values(), y._values(), (expected_nnz, *y._values().shape[1:]))
elif x.layout in {torch.sparse_csr, torch.sparse_bsr}:
assertEqualAttrs(x.crow_indices(), y.crow_indices(), y.crow_indices().shape)
assertEqualAttrs(x.col_indices(), y.col_indices(), (*y.col_indices().shape[:-1], expected_nnz))
batch_dim = x.col_indices().ndim - 1
values_shape = (*y.values().shape[:batch_dim], expected_nnz, *y.values().shape[batch_dim + 1:])
self.assertEqual(x.values().layout, y.values().layout)
self.assertEqual(x.values().dtype, y.values().dtype)
self.assertEqual(x.values().shape, values_shape)
elif x.layout in {torch.sparse_csc, torch.sparse_bsc}:
assertEqualAttrs(x.ccol_indices(), y.ccol_indices(), y.ccol_indices().shape)
assertEqualAttrs(x.row_indices(), y.row_indices(), (*y.row_indices().shape[:-1], expected_nnz))
batch_dim = x.row_indices().ndim - 1
values_shape = (*y.values().shape[:batch_dim], expected_nnz, *y.values().shape[batch_dim + 1:])
self.assertEqual(x.values().layout, y.values().layout)
self.assertEqual(x.values().dtype, y.values().dtype)
self.assertEqual(x.values().shape, values_shape)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
test_sparse_matmul
|
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
# We convert grad to dense since dense and sparse mm
# implementations handle materialized zeroes differently.
self.assertEqual(a.grad.to_dense(), a_grad.to_dense())
self.assertEqual(b.grad.to_dense(), b_grad.to_dense())
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
# Check result is truly coalesced
self.assertTrue(r2.is_coalesced() and is_coalesced_indices(r2))
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), check_sparse_nnz=True, nondet_tol=1e-5)
else:
gradcheck(fn, (a, b), check_sparse_nnz=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
|
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
# We convert grad to dense since dense and sparse mm
# implementations handle materialized zeroes differently.
self.assertEqual(a.grad.to_dense(), a_grad.to_dense())
self.assertEqual(b.grad.to_dense(), b_grad.to_dense())
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
# Check result is truly coalesced
self.assertTrue(r2.is_coalesced() and is_coalesced_indices(r2))
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), nondet_tol=1e-5, masked=True)
else:
gradcheck(fn, (a, b), masked=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_cuda_sparse_cpu_dense_add
|
def test_cuda_sparse_cpu_dense_add(self):
x = torch.zeros(3, 4, 4)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4).cuda(),
[3, 4, 4])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(3, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0).cuda(),
[3, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(0, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0).cuda(),
[0, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
|
def test_cuda_sparse_cpu_dense_add(self):
x = torch.zeros(3, 4, 4)
sparse_y = torch.sparse_coo_tensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4).cuda(),
[3, 4, 4])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(3, 4, 4, 0)
sparse_y = torch.sparse_coo_tensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0).cuda(),
[3, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(0, 4, 4, 0)
sparse_y = torch.sparse_coo_tensor(torch.empty(1, 0).long().cuda(),
torch.randn(0, 4, 4, 0).cuda(),
[0, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
class TestSparseOneOff(TestCase):
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
class TestSparseOneOff(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
_sparse_to_dense
|
def _sparse_to_dense(tensor):
if tensor.dtype != torch.bool:
return tensor.to_dense()
# to_dense uses coalesce which isn't implemented for bool
return tensor.to(torch.int8).to_dense().to(torch.bool)
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseUnaryUfuncs(TestCase):
exact_dtype = True
@_sparse_unary_ops
def test_sparse_consistency(self, device, dtype, op):
sample = first_sample(self, op.sample_inputs(device, dtype))
assert isinstance(sample.input, torch.Tensor)
expected = op(sample.input, *sample.args, **sample.kwargs)
assert torch.is_tensor(expected)
output = op(sample.input.to_sparse(), *sample.args, **sample.kwargs)
assert torch.is_tensor(output)
self.assertEqual(_sparse_to_dense(output), expected)
@_sparse_unary_ops
def test_out(self, device, dtype, op):
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
sample = first_sample(self, op.sample_inputs(device, dtype))
sample.input = sample.input.to_sparse()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = torch.zeros(sample.input.shape, device=device,
dtype=expect.dtype, layout=torch.sparse_coo)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@_sparse_unary_ops
def test_inplace(self, device, dtype, op):
if op.inplace_variant is None:
self.skipTest("Skipped! Out not supported")
sample = first_sample(self, op.sample_inputs(device, dtype))
sample.input = sample.input.to_sparse().coalesce()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
return
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@_sparse_unary_ops
def test_sparse_zero_dims(self, device, dtype, op):
# test 0x0 sparse_coo_tensor
indices = torch.empty(2, 0, dtype=torch.int64)
values = torch.empty(0, dtype=dtype)
sparse_0x0 = torch.sparse_coo_tensor(indices, values, (0, 0))
expected = torch.sparse_coo_tensor(indices, op(values), (0, 0))
actual = op(sparse_0x0)
self.assertEqual(expected, actual)
@_sparse_unary_ops
def test_sparse_zeros(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
zero_input = torch.zeros((), device=device, dtype=dtype)
sparse_input = torch.zeros((), dtype=dtype, device=device,
layout=torch.sparse_coo)
expect = op(zero_input)
actual = op(sparse_input)
self.assertEqual(expect, _sparse_to_dense(actual))
@ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
def test_sparse_fn_grad(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Op doesn't support autograd")
for sample in op.sample_inputs(device, dtype):
sparse_input = sample.input.to_sparse().detach().requires_grad_(True)
def fn(x):
return _sparse_to_dense(
op(x, *sample.args, **sample.kwargs))
self.assertTrue(gradcheck(
fn,
(sparse_input,),
check_batched_grad=False,
check_grad_dtypes=True,
check_sparse_nnz=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode))
class TestSparseMaskedReductions(TestCase):
exact_dtype = True
@ops(sparse_masked_reduction_ops)
def test_future_empty_dim(self, device, dtype, op):
"""Currently, `dim=()` in reductions operations means "reduce over
all dimensions" while in future, it will read "no reduce". See
https://github.com/pytorch/pytorch/issues/29137
For sparse masked reductions, we'll implement the current behavior.
For testing, we'll use samples with `dim=0` and map it to
`dim=()` until
torch.testing._internal.common_methods_invocations._generate_reduction_kwargs
is made to generate samples with `dim=()` for non-scalar
inputs. With this and after gh-29137 is resolved, this test
can be deleted. See also `torch.masked._canonical_dim`
implementation about changing the `dim=()` behavior.
"""
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
op_name = op.name.replace('masked.', '')
for sample_input in samples:
if sample_input.kwargs.get('dim') != 0:
continue
sample_input_kwargs = dict(sample_input.kwargs)
sample_input_kwargs['dim'] = () # reduce over all dimensions
t = sample_input.input
mask = sample_input_kwargs.get('mask')
if mask is None and op_name in {'prod', 'amax', 'amin'}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch.masked.prod implementation
# for details.
continue
sparse_op_kwargs = dict(sample_input_kwargs)
actual = op(t.to_sparse(), *sample_input.args, **sample_input_kwargs)
self.assertEqual(actual.layout, torch.sparse_coo)
expected = op(t, *sample_input.args, **sample_input_kwargs).to_sparse()
self.assertEqual(actual, expected)
class TestSparseMeta(TestCase):
exact_dtype = True
def test_basic(self):
r = torch.empty(4, 4, layout=torch.sparse_coo, device='meta')
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r, r2)
r3 = torch.sparse_coo_tensor(size=(4, 4), device='meta')
self.assertTrue(r3.is_meta)
self.assertEqual(r, r3)
r.sparse_resize_((4, 4), 1, 1)
r.sparse_resize_and_clear_((4, 4, 4), 2, 1)
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), 1)
self.assertEqual(r._dimV(), 1)
self.assertEqual(r._nnz(), 0)
# nnz zero sparse tensors should always be coalesced at creation
self.assertEqual(r.is_coalesced(), True)
# but we can force them into the uncoalesed state
r._coalesced_(False)
self.assertEqual(r.is_coalesced(), False)
# return the coalesced state for indices/values access
r._coalesced_(True)
# TODO: this sort of aliasing will need to be handled by
# functionalization
self.assertEqual(r._indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r._values(), torch.empty(0, 4, device='meta'))
self.assertEqual(r.indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r.values(), torch.empty(0, 4, device='meta'))
class TestSparseAny(TestCase):
@onlyCPU
@all_sparse_layouts('layout', include_strided=False)
@torch.sparse.check_sparse_tensor_invariants(enable=False)
def test_check_sparse_tensor_invariants(self, layout):
if layout is torch.sparse_coo:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
invalid_indices = torch.tensor([[0], [3]]) # column index is out of range
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_coo_tensor(invalid_indices, values, shape)
else:
return torch.sparse_coo_tensor(invalid_indices, values, shape, check_invariants=check_invariants)
expected_exception_message = 'size is inconsistent with indices: for dim 1, size is 2 but found index 3'
elif layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
compressed_indices = torch.tensor([0, 0, 1])
invalid_plain_indices = torch.tensor([3]) # index is out of range
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
values = torch.tensor([[[1]]])
else:
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout)
else:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout,
check_invariants=check_invariants)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
expected_exception_message = r'`0 <= col_indices < ncols` is not satisfied.'
else:
expected_exception_message = r'`0 <= row_indices < nrows` is not satisfied.'
else:
raise NotImplementedError(layout)
# First, consider the case where invariant checks are disabled
# "globally" (read: within the context of this test method
# caller) as defined by check_sparse_tensor_invariants(False)
# decorator:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Enable the invariant checks in a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Leaving the local context must restore the "global" state of
# the invariant check feature:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are disabled by default, we can
# create an invalid sparse tensor without raising an
# exception:
r = create_invalid_tensor()
self.assertEqual(r.layout, layout)
# Or, when disabling the invariants check explicitly:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Enabling invariant check via constructor's optional argument
# will raise an exception when sparse tensor invariants are
# violated:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# Check that the global invariant check flag has been restored
# after raising the exception above:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Next, consider the case where invariant checks are enabled
# within a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are now enabled by default, an
# attempt to create an invalid sparse tensor will lead to
# an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor()
# Similarly, when enabling the invariant checks
# explicitly, invalid sparse tensor construction will lead
# to an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# However, invariants check can be disabled via
# constructor's optional argument so that the invalid
# tensor is succesfully constructed:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Check that the invariant check flag has been restored
# when leaving the constructor:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Double-check restoring the global state when leaving the
# local context:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
def test_generate_simple_inputs(self):
layouts = [torch.strided, torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc]
tested_combinations = set()
for tensors in zip(*map(self.generate_simple_inputs, layouts)):
for i, t in enumerate(tensors):
self.assertEqual(t.layout, layouts[i])
# all layouts must produce semantically the same tensors
self.assertEqual(t, tensors[0])
if t.layout is torch.strided:
is_hybrid = None
else:
is_hybrid = t.dense_dim() > 0
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
is_batch = t.crow_indices().ndim > 1
elif t.layout in {torch.sparse_csc, torch.sparse_bsc}:
is_batch = t.ccol_indices().ndim > 1
else:
is_batch = None
if t.layout in {torch.sparse_bsr, torch.sparse_bsc}:
blocksize = t.values().shape[1:3]
nontrivial_blocksize = 1 not in blocksize
else:
nontrivial_blocksize = None
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
contiguous_indices = t.crow_indices().is_contiguous() and t.col_indices().is_contiguous()
contiguous_values = t.values().is_contiguous()
elif t.layout in {torch.sparse_csc, torch.sparse_bsc}:
contiguous_indices = t.ccol_indices().is_contiguous() and t.row_indices().is_contiguous()
contiguous_values = t.values().is_contiguous()
elif t.layout is torch.sparse_coo:
contiguous_indices = t._indices().is_contiguous()
contiguous_values = t._values().is_contiguous()
else:
contiguous_indices = None
contiguous_values = t.is_contiguous()
tested_combinations.add((t.layout, is_hybrid, is_batch, nontrivial_blocksize,
contiguous_indices, contiguous_values))
# Ensure that the inputs generation covers all layout,
# non-hybrid/hybrid, non-batch/batch, and contiguity
# combinations:
untested_combinations = set()
for layout in layouts:
for is_hybrid in [False, True]:
if layout is torch.strided:
is_hybrid = None
for is_batch in [False, True]:
if layout in {torch.sparse_coo, torch.strided}:
is_batch = None
for nontrivial_blocksize in [False, True]:
if layout not in {torch.sparse_bsr, torch.sparse_bsc}:
nontrivial_blocksize = None
for contiguous_indices in [False, True]:
if layout is torch.strided:
contiguous_indices = None
elif not is_batch:
# indices are contiguous per-patch
contiguous_indices = True
for contiguous_values in [False, True]:
key = (layout, is_hybrid, is_batch, nontrivial_blocksize,
contiguous_indices, contiguous_values)
if key not in tested_combinations:
untested_combinations.add(
f'layout={layout}, is_hybrid={is_hybrid}, is_batch={is_batch},'
f' nontrivial_blocksize={nontrivial_blocksize},'
f' contiguous_indices{contiguous_indices}, contiguous_values={contiguous_values}')
assert not untested_combinations, untested_combinations
@all_sparse_layouts('from_layout', include_strided=False)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@parametrize("index_dtype", [torch.int32, torch.int64])
def test_to_dense(self, from_layout, device, dtype, index_dtype):
"""
This test tests conversion from any layout to any sparse layout.
"""
for t in self.generate_simple_inputs(
from_layout, device=device, dtype=dtype, index_dtype=index_dtype):
r = t.to_dense()
self.assertEqual(r.layout, torch.strided)
self.assertEqual(r, t)
@all_sparse_layouts('from_layout', include_strided=True)
@all_sparse_layouts('to_layout', include_strided=False)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@parametrize("index_dtype", [torch.int32, torch.int64])
def test_to_sparse(self, from_layout, to_layout, device, dtype, index_dtype):
"""
This test tests conversion from any layout to any sparse layout.
"""
for t in self.generate_simple_inputs(
from_layout, device=device, dtype=dtype, index_dtype=index_dtype,
enable_hybrid=(
# TODO: to support conversion strided->hybrid
# CSR/CSC/BSR/BSC, to_sparse() requires extra keyword
# argument, either nof_batch_dims or
# nof_dense_dims
not (from_layout is torch.strided and to_layout in
{torch.sparse_bsr, torch.sparse_bsc, torch.sparse_csr, torch.sparse_csc}))):
if to_layout in {torch.sparse_bsr, torch.sparse_bsc}:
if from_layout == torch.sparse_bsr:
batch_ndim = t.crow_indices().dim() - 1
blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
elif from_layout == torch.sparse_bsc:
batch_ndim = t.ccol_indices().dim() - 1
blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
else:
blocksize = (1, 1)
else:
blocksize = None
if from_layout is torch.strided:
is_batch = None
is_hybrid = None
else:
is_batch = t.dim() > (t.sparse_dim() + t.dense_dim())
is_hybrid = t.dense_dim() > 0
def explicit_to_sparse(x):
# Used to check that the explicit conversion methods
# are consistent with the `to_sparse(*, layout,
# blocksize)` method.
if to_layout is torch.sparse_coo:
return x.to_sparse_coo()
elif to_layout is torch.sparse_csr:
return x.to_sparse_csr()
elif to_layout is torch.sparse_csc:
return x.to_sparse_csc()
elif to_layout is torch.sparse_bsr:
return x.to_sparse_bsr(blocksize)
elif to_layout is torch.sparse_bsc:
return x.to_sparse_bsc(blocksize)
else:
assert 0 # unreachable
# TODO: The following exception cases all correspond to
# not implemented conversions
if from_layout in {
torch.sparse_csr, torch.sparse_csc} and to_layout in {torch.sparse_bsr, torch.sparse_bsc} and is_batch:
with self.assertRaisesRegex(RuntimeError,
r"conversion from (Csr|Csc) to (Bsr|Bsc) for batched inputs is not implemented"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(RuntimeError,
r"conversion from (Csr|Csc) to (Bsr|Bsc) for batched inputs is not implemented"):
explicit_to_sparse(t)
continue
elif from_layout is torch.sparse_coo and to_layout in {
torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} and t.sparse_dim() != 2:
with self.assertRaisesRegex(
RuntimeError, "Only tensors with two sparse dimensions can be converted to the Sparse(Csr|Csc) layout"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(
RuntimeError, "Only tensors with two sparse dimensions can be converted to the Sparse(Csr|Csc) layout"):
explicit_to_sparse(t)
continue
elif from_layout in {torch.sparse_csr, torch.sparse_csc,
torch.sparse_bsr, torch.sparse_bsc} and to_layout is torch.sparse_coo and is_batch:
with self.assertRaisesRegex(RuntimeError,
"crow_indices is supposed to be a vector, but got \\d+ dimensional tensor"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(RuntimeError,
"crow_indices is supposed to be a vector, but got \\d+ dimensional tensor"):
explicit_to_sparse(t)
continue
elif (from_layout, to_layout) in {(torch.sparse_bsc, torch.sparse_csr), (torch.sparse_bsc, torch.sparse_csc),
(torch.sparse_bsr, torch.sparse_csr), (torch.sparse_bsr, torch.sparse_csc)}:
with self.assertRaisesRegex(
RuntimeError,
r"sparse_compressed_to_sparse_(csr|csc|bsr|bsc) expected\s*(Sparse(Csc|Csr)[,]|)\s*Sparse(Csr|Bsr)"
" or Sparse(Csc|Bsc) layout but got Sparse(Csr|Csc|Bsr|Bsc)"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(
RuntimeError,
r"sparse_compressed_to_sparse_(csr|csc|bsr|bsc) expected\s*(Sparse(Csc|Csr)[,]|)\s*Sparse(Csr|Bsr)"
" or Sparse(Csc|Bsc) layout but got Sparse(Csr|Csc|Bsr|Bsc)"):
explicit_to_sparse(t)
self.skipTest('NOT IMPL')
else:
r = t.to_sparse(layout=to_layout, blocksize=blocksize)
self.assertEqual(r.layout, to_layout)
# to_sparse method uses unsafe construction of sparse
# tensors. Here we explicitly validate the results to
# make sure that the sparse tensors are consistent
# with the corresponding sparse tensor invariants.
if r.layout in {torch.sparse_csr, torch.sparse_bsr, torch.sparse_csc, torch.sparse_bsc}:
if r.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = r.crow_indices(), r.col_indices()
else:
compressed_indices, plain_indices = r.ccol_indices(), r.row_indices()
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, r.values(),
r.shape, r.layout)
if from_layout in {torch.strided, torch.sparse_coo}:
self.assertEqual(compressed_indices.dtype, torch.int64)
self.assertEqual(plain_indices.dtype, torch.int64)
else:
self.assertEqual(compressed_indices.dtype, index_dtype)
self.assertEqual(plain_indices.dtype, index_dtype)
self.assertEqual(r.values().dtype, dtype)
elif r.layout is torch.sparse_coo:
if t.layout is torch.sparse_coo:
self.assertEqual(t.is_coalesced(), r.is_coalesced())
# Check r is truly coalesced when r.is_coalesced == True
if r.is_coalesced():
self.assertTrue(is_coalesced_indices(r))
torch._validate_sparse_coo_tensor_args(r._indices(), r._values(), r.shape)
self.assertEqual(r._indices().dtype, torch.int64)
self.assertEqual(r._values().dtype, dtype)
else:
assert 0 # unreachable
# Finally, we'll test tensor equality:
self.assertEqual(r, t)
# Also, check consistency with explicit conversion methods:
r2 = explicit_to_sparse(t)
self.assertEqual(r2, r)
# Check inverse conversion from sparse compressed block tensors
if from_layout == torch.sparse_bsr:
batch_ndim = t.crow_indices().dim() - 1
from_blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
elif from_layout == torch.sparse_bsc:
batch_ndim = t.ccol_indices().dim() - 1
from_blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
else:
continue
if r.ndim != 2:
continue
t2 = r.to_sparse(layout=from_layout, blocksize=from_blocksize)
self.assertEqual(t2, t)
# extra tests
if (from_layout, to_layout) == (torch.sparse_csr, torch.sparse_bsr):
# See gh-90910
t = torch.tensor([[0, 0, 1, 0], [0, 1, 0, 0]], dtype=dtype, device=device).to_sparse_csr()
r = t.to_sparse_bsr((2, 2))
torch._validate_sparse_compressed_tensor_args(r.crow_indices(), r.col_indices(), r.values(), r.shape, r.layout)
self.assertEqual(r, t)
if (from_layout, to_layout) in {(torch.sparse_csr, torch.sparse_csc),
(torch.sparse_csc, torch.sparse_csr)}:
# See gh-91007
compressed_indices = torch.tensor([0, 4, 8, 8, 12, 16, 20], dtype=index_dtype, device=device)
plain_indices = torch.tensor([0, 1, 2, 3] * 5, dtype=index_dtype, device=device)
t = torch.sparse_compressed_tensor(compressed_indices, plain_indices, range(20),
dtype=dtype, device=device, layout=from_layout)
r = t.to_sparse(layout=to_layout)
if r.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = r.crow_indices(), r.col_indices()
else:
compressed_indices, plain_indices = r.ccol_indices(), r.row_indices()
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, r.values(), r.shape, r.layout)
self.assertEqual(r, t)
@onlyNativeDeviceTypes
@suppress_warnings
@ops(reduction_ops_with_sparse_support)
@precisionOverride({torch.bfloat16: 5e-4, torch.float16: 5e-3})
@all_sparse_layouts('layout', include_strided=False)
def test_reductions(self, layout, device, dtype, op):
count = 0
for sample in op.sample_inputs_sparse(layout, device, dtype):
count += 1
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
result = op.op(t_inp, *t_args, **t_kwargs)
# Checking invariant rop(inp, ...).to_dense() == rop(inp.to_dense(), ...)
dense = op.op(t_inp.to_dense(), *t_args, **t_kwargs)
self.assertEqual(result, dense)
if count == 0:
# we count samples to avoid false-positive test reports
self.skipTest('no sample inputs')
@onlyNativeDeviceTypes
@suppress_warnings
@ops(reduction_ops_with_sparse_support, allowed_dtypes=(torch.float32, torch.float64, torch.complex64, torch.complex128))
@all_sparse_layouts('layout', include_strided=False)
def test_reductions_backward(self, layout, device, dtype, op):
count = 0
for sample in op.sample_inputs_sparse(layout, device, dtype, requires_grad=True):
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
r = op.op(t_inp, *t_args, **t_kwargs)
if r.numel() != 0:
r = r.sum()
if op.name == 'sum':
count += 1
r.backward()
self.assertEqual(t_inp.grad, torch.ones(t_inp.shape, dtype=dtype, device=device))
else:
self.skipTest('NOT IMPL')
if count == 0:
# we count samples to avoid false-positive test reports
self.skipTest('no sample inputs')
@onlyNativeDeviceTypes
@suppress_warnings
@parametrize("mth", [subtest(mth, name=mth.__name__)
for mth in [torch.Tensor.is_coalesced,
torch.Tensor.coalesce,
torch.Tensor.indices,
torch.Tensor.values,
torch.Tensor.crow_indices,
torch.Tensor.col_indices,
torch.Tensor.ccol_indices,
torch.Tensor.row_indices,
]])
@all_sparse_layouts('layout', include_strided=True)
def test_unsupported_backend_error_message(self, mth, layout, device):
inp = torch.tensor([[1, 2], [3, 4]], device=device).to_sparse(
layout=layout,
blocksize=(1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None)
assert inp.layout is layout
expected_behaviour = dict(
# <mth name> = (<supported layouts>, <exception message on other layouts>)
is_coalesced=({torch.sparse_coo},
"is_coalesced expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
coalesce=({torch.sparse_coo},
"coalesce expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
indices=({torch.sparse_coo},
"indices expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
values=({torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc},
"values expected sparse tensor layout but got Strided"),
crow_indices=({torch.sparse_csr, torch.sparse_bsr},
"crow_indices expected sparse row compressed tensor layout but got (Sparse(Csc|Bsc|)|Strided)"),
col_indices=({torch.sparse_csr, torch.sparse_bsr},
"col_indices expected sparse row compressed tensor layout but got (Sparse(Csc|Bsc|)|Strided)"),
ccol_indices=({torch.sparse_csc, torch.sparse_bsc},
"ccol_indices expected sparse column compressed tensor layout but got (Sparse(Csr|Bsr|)|Strided)"),
row_indices=({torch.sparse_csc, torch.sparse_bsc},
"row_indices expected sparse column compressed tensor layout but got (Sparse(Csr|Bsr|)|Strided)"),
)[mth.__name__]
if layout in expected_behaviour[0]:
mth(inp)
else:
with self.assertRaisesRegex(RuntimeError, expected_behaviour[1]):
mth(inp)
# e.g., TestSparseUnaryUfuncsCPU and TestSparseUnaryUfuncsCUDA
instantiate_device_type_tests(TestSparseUnaryUfuncs, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseMaskedReductions, globals(), except_for='meta')
# e.g., TestSparseCPU and TestSparseCUDA
instantiate_device_type_tests(TestSparse, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseAny, globals(), except_for='meta')
if __name__ == '__main__':
run_tests()
|
def _sparse_to_dense(tensor):
if tensor.dtype != torch.bool:
return tensor.to_dense(masked_grad=True)
# to_dense uses coalesce which isn't implemented for bool
return tensor.to(torch.int8).to_dense().to(torch.bool)
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseUnaryUfuncs(TestCase):
exact_dtype = True
@_sparse_unary_ops
def test_sparse_consistency(self, device, dtype, op):
sample = first_sample(self, op.sample_inputs(device, dtype))
assert isinstance(sample.input, torch.Tensor)
expected = op(sample.input, *sample.args, **sample.kwargs)
assert torch.is_tensor(expected)
output = op(sample.input.to_sparse(), *sample.args, **sample.kwargs)
assert torch.is_tensor(output)
self.assertEqual(_sparse_to_dense(output), expected)
@_sparse_unary_ops
def test_out(self, device, dtype, op):
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
sample = first_sample(self, op.sample_inputs(device, dtype))
sample.input = sample.input.to_sparse()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = torch.sparse_coo_tensor(sample.input.shape, device=device,
dtype=expect.dtype)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@_sparse_unary_ops
def test_inplace(self, device, dtype, op):
if op.inplace_variant is None:
self.skipTest("Skipped! Out not supported")
sample = first_sample(self, op.sample_inputs(device, dtype))
sample.input = sample.input.to_sparse().coalesce()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
return
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@_sparse_unary_ops
def test_sparse_zero_dims(self, device, dtype, op):
# test 0x0 sparse_coo_tensor
indices = torch.empty(2, 0, dtype=torch.int64)
values = torch.empty(0, dtype=dtype)
sparse_0x0 = torch.sparse_coo_tensor(indices, values, (0, 0))
expected = torch.sparse_coo_tensor(indices, op(values), (0, 0))
actual = op(sparse_0x0)
self.assertEqual(expected, actual)
@_sparse_unary_ops
def test_sparse_zeros(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
zero_input = torch.zeros((), device=device, dtype=dtype)
sparse_input = torch.sparse_coo_tensor((), dtype=dtype, device=device)
expect = op(zero_input)
actual = op(sparse_input)
self.assertEqual(expect, _sparse_to_dense(actual))
@ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
def test_sparse_fn_grad(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Op doesn't support autograd")
for sample in op.sample_inputs(device, dtype):
sparse_input = sample.input.to_sparse().detach().requires_grad_(True)
def fn(x):
return _sparse_to_dense(
op(x, *sample.args, **sample.kwargs))
self.assertTrue(gradcheck(
fn,
(sparse_input,),
check_batched_grad=False,
check_grad_dtypes=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode,
masked=True))
class TestSparseMaskedReductions(TestCase):
exact_dtype = True
fp16_low_precision_list = {
'masked.prod',
}
@ops(sparse_masked_reduction_ops)
def test_future_empty_dim(self, device, dtype, op):
"""Currently, `dim=()` in reductions operations means "reduce over
all dimensions" while in future, it will read "no reduce". See
https://github.com/pytorch/pytorch/issues/29137
For sparse masked reductions, we'll implement the current behavior.
For testing, we'll use samples with `dim=0` and map it to
`dim=()` until
torch.testing._internal.common_methods_invocations._generate_reduction_kwargs
is made to generate samples with `dim=()` for non-scalar
inputs. With this and after gh-29137 is resolved, this test
can be deleted. See also `torch.masked._canonical_dim`
implementation about changing the `dim=()` behavior.
"""
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
op_name = op.name.replace('masked.', '')
for sample_input in samples:
if sample_input.kwargs.get('dim') != 0:
continue
sample_input_kwargs = dict(sample_input.kwargs)
sample_input_kwargs['dim'] = () # reduce over all dimensions
t = sample_input.input
mask = sample_input_kwargs.get('mask')
if mask is None and op_name in {'prod', 'amax', 'amin'}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch.masked.prod implementation
# for details.
continue
sparse_op_kwargs = dict(sample_input_kwargs)
actual = op(t.to_sparse(), *sample_input.args, **sample_input_kwargs)
self.assertEqual(actual.layout, torch.sparse_coo)
expected = op(t, *sample_input.args, **sample_input_kwargs).to_sparse()
atol = None
rtol = None
if op.name in self.fp16_low_precision_list and dtype == torch.half:
atol = 1e-5
rtol = 2e-3
self.assertEqual(actual, expected, atol=atol, rtol=rtol)
class TestSparseMeta(TestCase):
exact_dtype = True
def _test_meta_sparse_coo(self, dtype):
r = torch.empty(4, 4, layout=torch.sparse_coo, device='meta', dtype=dtype)
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r, r2)
r3 = torch.sparse_coo_tensor(size=(4, 4), device='meta', dtype=dtype)
self.assertTrue(r3.is_meta)
self.assertEqual(r, r3)
r.sparse_resize_((4, 4), 1, 1)
r.sparse_resize_and_clear_((4, 4, 4), 2, 1)
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), 1)
self.assertEqual(r._dimV(), 1)
self.assertEqual(r._nnz(), 0)
# nnz zero sparse tensors should always be coalesced at creation
self.assertEqual(r.is_coalesced(), True)
# but we can force them into the uncoalesed state
r._coalesced_(False)
self.assertEqual(r.is_coalesced(), False)
# return the coalesced state for indices/values access
r._coalesced_(True)
# TODO: this sort of aliasing will need to be handled by
# functionalization
self.assertEqual(r._indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r._values(), torch.empty(0, 4, device='meta', dtype=dtype))
self.assertEqual(r.indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r.values(), torch.empty(0, 4, device='meta', dtype=dtype))
def _test_meta_sparse_compressed(self, dtype, layout, batchsize, densesize):
index_dtype = torch.int64
blocksize = (2, 3) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
sparsesize = (4, 6)
nnz = 0
shape = (*batchsize, *sparsesize, *densesize)
compressed_dim = 0 if layout in {torch.sparse_csr, torch.sparse_bsr} else 1
nof_compressed_indices = (sparsesize[compressed_dim] // blocksize[compressed_dim] + 1 if blocksize
else sparsesize[compressed_dim] + 1)
compressed_indices = torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_dtype)
plain_indices = torch.empty((*batchsize, nnz), device='meta', dtype=index_dtype)
values = torch.empty((*batchsize, nnz, *blocksize, *densesize), device='meta', dtype=dtype)
r = torch.sparse_compressed_tensor(
compressed_indices,
plain_indices,
values,
shape,
layout=layout
)
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), len(densesize))
self.assertEqual(r._nnz(), nnz)
batch_dims = r.ndim - r.sparse_dim() - r.dense_dim()
r_blocksize = r.values().shape[batch_dims + 1: batch_dims + 1 + len(blocksize)]
self.assertEqual(r_blocksize, blocksize)
r_compressed_indices = r.crow_indices() if layout in {torch.sparse_csr, torch.sparse_bsr} else r.ccol_indices()
r_plain_indices = r.col_indices() if layout in {torch.sparse_csr, torch.sparse_bsr} else r.row_indices()
self.assertEqual(r_compressed_indices,
torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_dtype))
self.assertEqual(r_plain_indices, torch.empty((*batchsize, nnz), device='meta', dtype=index_dtype))
self.assertEqual(r.values(), torch.empty((*batchsize, nnz, *blocksize, *densesize), device='meta', dtype=dtype))
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r2, r)
if layout in {torch.sparse_csr, torch.sparse_csc}:
r3 = torch.empty((*batchsize, *sparsesize), dtype=dtype, layout=layout, device="meta")
self.assertTrue(r3.is_meta)
if not densesize:
# dense dimensions cannot be specified for torch.empty
self.assertEqual(r3, r)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_meta(self, dtype, layout):
if layout is torch.sparse_coo:
self._test_meta_sparse_coo(dtype)
else:
for batchsize, densesize in itertools.product([(), (2,)], [(), (3,)]):
self._test_meta_sparse_compressed(dtype, layout, batchsize, densesize)
def _test_print_meta_data(self, dtype, layout, batchsize, sparsesize, densesize):
index_dtype = torch.int64
nnz = 0
blocksize = (2, 3) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
shape = (*batchsize, *sparsesize, *densesize)
values = torch.empty((*batchsize, nnz, *blocksize, *densesize), device='meta', dtype=dtype)
if layout is torch.sparse_coo:
indices = torch.empty((len(sparsesize), nnz), device='meta', dtype=index_dtype)
x = torch.sparse_coo_tensor(indices, values, shape)
else:
compressed_dim = 0 if layout in {torch.sparse_csr, torch.sparse_bsr} else 1
nof_compressed_indices = (sparsesize[compressed_dim] // blocksize[compressed_dim] + 1 if blocksize
else sparsesize[compressed_dim] + 1)
compressed_indices = torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_dtype)
plain_indices = torch.empty((*batchsize, nnz), device='meta', dtype=index_dtype)
x = torch.sparse_compressed_tensor(
compressed_indices,
plain_indices,
values,
shape,
layout=layout
)
printed = []
printed.append(f"########## {dtype}/{index_dtype}/size={batchsize}+{sparsesize}+{blocksize}+{densesize} ##########")
printed.append("# sparse meta tensor")
printed.append(str(x))
return printed
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_print_meta(self, dtype, layout):
printed = []
for batchsize, sparsesize, densesize in itertools.product(
[(), (2,)], [(4, 6), (3, 5, 7)], [(), (3,)]
):
if layout is torch.sparse_coo and batchsize:
# COO tensors don't have batch dimensions
continue
if layout is not torch.sparse_coo and len(sparsesize) != 2:
# CSR/CSC/BSR/BSC tensors must have 2 sparse dimensions
continue
printed += self._test_print_meta_data(dtype, layout, batchsize, sparsesize, densesize)
orig_maxDiff = self.maxDiff
self.maxDiff = None
try:
self.assertExpected('\n'.join(printed))
self.maxDiff = orig_maxDiff
except Exception:
self.maxDiff = orig_maxDiff
raise
def assertEqualMeta(self, x, y, expected_nnz):
self.assertEqual(x.layout, y.layout)
self.assertEqual(x.shape, y.shape)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
def assertEqualAttrs(x, y, expected_shape):
self.assertEqual(x.shape, expected_shape)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.layout, y.layout)
if not x.is_meta:
self.assertEqual(x.device, y.device)
if x.layout is torch.sparse_coo:
assertEqualAttrs(x._indices(), y._indices(), (*y._indices().shape[:-1], expected_nnz))
assertEqualAttrs(x._values(), y._values(), (expected_nnz, *y._values().shape[1:]))
elif x.layout in {torch.sparse_csr, torch.sparse_bsr}:
assertEqualAttrs(x.crow_indices(), y.crow_indices(), y.crow_indices().shape)
assertEqualAttrs(x.col_indices(), y.col_indices(), (*y.col_indices().shape[:-1], expected_nnz))
batch_dim = x.col_indices().ndim - 1
values_shape = (*y.values().shape[:batch_dim], expected_nnz, *y.values().shape[batch_dim + 1:])
self.assertEqual(x.values().layout, y.values().layout)
self.assertEqual(x.values().dtype, y.values().dtype)
self.assertEqual(x.values().shape, values_shape)
elif x.layout in {torch.sparse_csc, torch.sparse_bsc}:
assertEqualAttrs(x.ccol_indices(), y.ccol_indices(), y.ccol_indices().shape)
assertEqualAttrs(x.row_indices(), y.row_indices(), (*y.row_indices().shape[:-1], expected_nnz))
batch_dim = x.row_indices().ndim - 1
values_shape = (*y.values().shape[:batch_dim], expected_nnz, *y.values().shape[batch_dim + 1:])
self.assertEqual(x.values().layout, y.values().layout)
self.assertEqual(x.values().dtype, y.values().dtype)
self.assertEqual(x.values().shape, values_shape)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_to_meta(self, dtype, layout):
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = t.to(device="meta")
self.assertEqual(m.device.type, "meta")
self.assertEqualMeta(m, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_zeros_like_meta(self, dtype, layout):
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = torch.zeros_like(t, device="meta")
self.assertEqual(m.device.type, "meta")
self.assertEqualMeta(m, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_fake(self, dtype, layout):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
fake_mode = FakeTensorMode()
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
f = FakeTensor.from_tensor(t, fake_mode)
self.assertIsInstance(f, FakeTensor)
self.assertEqualMeta(f, t, 0)
d = f.detach()
self.assertIsInstance(d, FakeTensor)
self.assertEqualMeta(d, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_zeros_like_fake(self, dtype, layout):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
fake_mode = FakeTensorMode()
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
f = FakeTensor.from_tensor(t, fake_mode)
expected = torch.zeros_like(t)
with no_dispatch():
result = torch.zeros_like(f, device=f.fake_device)
self.assertEqual(result, expected)
self.assertEqualMeta(result, expected, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_sum_meta(self, dtype, layout):
device = 'cpu'
index_dtype = torch.int64
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = t.to(device='meta')
r = torch.sum(m)
expected = torch.sum(t).to(device="meta")
self.assertTrue(r.is_meta)
self.assertEqualMeta(r, expected, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_add_meta(self, dtype, layout):
device = 'cpu'
index_dtype = torch.int64
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
expected = torch.add(t, t).to(device='meta')
m = t.to(device='meta')
r = torch.add(m, m)
self.assertEqualMeta(r, expected, 0)
class _SparseDataset(torch.utils.data.Dataset):
# An utility class used in TestSparseAny.test_dataloader method.
def __init__(self, sparse_tensors):
self.sparse_tensors = sparse_tensors
def __len__(self):
return len(self.sparse_tensors)
def __getitem__(self, index):
return self.sparse_tensors[index]
class TestSparseAny(TestCase):
@onlyCPU
@all_sparse_layouts('layout', include_strided=False)
@torch.sparse.check_sparse_tensor_invariants(enable=False)
def test_check_sparse_tensor_invariants(self, layout):
if layout is torch.sparse_coo:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
invalid_indices = torch.tensor([[0], [3]]) # column index is out of range
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_coo_tensor(invalid_indices, values, shape)
else:
return torch.sparse_coo_tensor(invalid_indices, values, shape, check_invariants=check_invariants)
expected_exception_message = 'size is inconsistent with indices: for dim 1, size is 2 but found index 3'
elif layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
compressed_indices = torch.tensor([0, 0, 1])
invalid_plain_indices = torch.tensor([3]) # index is out of range
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
values = torch.tensor([[[1]]])
else:
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout)
else:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout,
check_invariants=check_invariants)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
expected_exception_message = r'`0 <= col_indices < ncols` is not satisfied.'
else:
expected_exception_message = r'`0 <= row_indices < nrows` is not satisfied.'
else:
raise NotImplementedError(layout)
# First, consider the case where invariant checks are disabled
# "globally" (read: within the context of this test method
# caller) as defined by check_sparse_tensor_invariants(False)
# decorator:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Enable the invariant checks in a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Leaving the local context must restore the "global" state of
# the invariant check feature:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are disabled by default, we can
# create an invalid sparse tensor without raising an
# exception:
r = create_invalid_tensor()
self.assertEqual(r.layout, layout)
# Or, when disabling the invariants check explicitly:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Enabling invariant check via constructor's optional argument
# will raise an exception when sparse tensor invariants are
# violated:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# Check that the global invariant check flag has been restored
# after raising the exception above:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Next, consider the case where invariant checks are enabled
# within a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are now enabled by default, an
# attempt to create an invalid sparse tensor will lead to
# an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor()
# Similarly, when enabling the invariant checks
# explicitly, invalid sparse tensor construction will lead
# to an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# However, invariants check can be disabled via
# constructor's optional argument so that the invalid
# tensor is succesfully constructed:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Check that the invariant check flag has been restored
# when leaving the constructor:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Double-check restoring the global state when leaving the
# local context:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Test nesting of pre-defined context managers
check_ctx = torch.sparse.check_sparse_tensor_invariants(True)
no_check_ctx = torch.sparse.check_sparse_tensor_invariants(False)
with check_ctx:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with no_check_ctx:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Test an attempt to re-use an activate context manager instance
check_ctx2 = torch.sparse.check_sparse_tensor_invariants(True)
with check_ctx:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with no_check_ctx:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with self.assertRaisesRegex(RuntimeError, "This context manager instance is already activated."
" Use a different context manager instance for context nesting"):
with check_ctx:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with check_ctx2:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
def test_generate_simple_inputs(self):
layouts = [torch.strided, torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc]
tested_combinations = set()
for tensors in zip(*map(self.generate_simple_inputs, layouts)):
for i, t in enumerate(tensors):
self.assertEqual(t.layout, layouts[i])
# all layouts must produce semantically the same tensors
self.assertEqual(t, tensors[0])
if t.layout is torch.strided:
is_hybrid = None
else:
is_hybrid = t.dense_dim() > 0
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
is_batch = t.crow_indices().ndim > 1
elif t.layout in {torch.sparse_csc, torch.sparse_bsc}:
is_batch = t.ccol_indices().ndim > 1
else:
is_batch = None
if t.layout in {torch.sparse_bsr, torch.sparse_bsc}:
blocksize = t.values().shape[1:3]
nontrivial_blocksize = 1 not in blocksize
else:
nontrivial_blocksize = None
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
contiguous_indices = t.crow_indices().is_contiguous() and t.col_indices().is_contiguous()
contiguous_values = t.values().is_contiguous()
elif t.layout in {torch.sparse_csc, torch.sparse_bsc}:
contiguous_indices = t.ccol_indices().is_contiguous() and t.row_indices().is_contiguous()
contiguous_values = t.values().is_contiguous()
elif t.layout is torch.sparse_coo:
contiguous_indices = t._indices().is_contiguous()
contiguous_values = t._values().is_contiguous()
else:
contiguous_indices = None
contiguous_values = t.is_contiguous()
tested_combinations.add((t.layout, is_hybrid, is_batch, nontrivial_blocksize,
contiguous_indices, contiguous_values))
# Ensure that the inputs generation covers all layout,
# non-hybrid/hybrid, non-batch/batch, and contiguity
# combinations:
untested_combinations = set()
for layout in layouts:
for is_hybrid in [False, True]:
if layout is torch.strided:
is_hybrid = None
for is_batch in [False, True]:
if layout in {torch.sparse_coo, torch.strided}:
is_batch = None
for nontrivial_blocksize in [False, True]:
if layout not in {torch.sparse_bsr, torch.sparse_bsc}:
nontrivial_blocksize = None
for contiguous_indices in [False, True]:
if layout is torch.strided:
contiguous_indices = None
elif not is_batch:
# indices are contiguous per-patch
contiguous_indices = True
for contiguous_values in [False, True]:
key = (layout, is_hybrid, is_batch, nontrivial_blocksize,
contiguous_indices, contiguous_values)
if key not in tested_combinations:
untested_combinations.add(
f'layout={layout}, is_hybrid={is_hybrid}, is_batch={is_batch},'
f' nontrivial_blocksize={nontrivial_blocksize},'
f' contiguous_indices{contiguous_indices}, contiguous_values={contiguous_values}')
assert not untested_combinations, untested_combinations
@all_sparse_layouts('layout', include_strided=False)
def test_constructor_autograd(self, device, layout):
def specific_constructor(*args, **kwargs):
if layout is torch.sparse_csr:
return torch.sparse_csr_tensor(*args, **kwargs)
elif layout is torch.sparse_csc:
return torch.sparse_csc_tensor(*args, **kwargs)
elif layout is torch.sparse_bsc:
return torch.sparse_bsc_tensor(*args, **kwargs)
elif layout is torch.sparse_bsr:
return torch.sparse_bsr_tensor(*args, **kwargs)
elif layout is torch.sparse_coo:
return torch.sparse_coo_tensor(*args, **kwargs)
else:
raise NotImplementedError(layout)
def generic_constructor(*args, **kwargs):
if layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
kwargs.update(layout=layout)
return torch.sparse_compressed_tensor(*args, **kwargs)
elif layout is torch.sparse_coo:
return torch.sparse_coo_tensor(*args, **kwargs)
else:
raise NotImplementedError(layout)
if layout is torch.sparse_coo:
constructors = (specific_constructor,)
else:
constructors = (specific_constructor, generic_constructor)
for args, kwargs in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_batch=False, # TODO: remove after gh-104868 is resolved
output_tensor=False):
values_offset = 1 if layout is torch.sparse_coo else 2
for cnstr in constructors:
for requires_grad in (False, True):
values = args[values_offset].detach().requires_grad_(requires_grad)
args = (*args[:values_offset], values, *args[values_offset + 1:])
kwargs_ = dict(kwargs)
args_ = args + (kwargs_.pop('size'),)
sparse = cnstr(*args, **kwargs)
self.assertEqual(sparse.requires_grad, requires_grad)
if requires_grad:
for masked in (False, True):
if layout is torch.sparse_coo:
torch.autograd.gradcheck(
lambda i, v: cnstr(i, v, **kwargs).to_dense(masked_grad=masked),
args, masked=masked)
torch.autograd.gradcheck(
lambda i, v, sz: cnstr(i, v, sz, **kwargs_).to_dense(masked_grad=masked),
args_, masked=masked)
else:
if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} and 0:
# TODO: remove this if-block after gh-107370 is resolved
continue
torch.autograd.gradcheck(
lambda ci, pi, v: cnstr(ci, pi, v, **kwargs).to_dense(masked_grad=masked),
args, masked=masked)
torch.autograd.gradcheck(
lambda ci, pi, v, sz: cnstr(ci, pi, v, sz, **kwargs_).to_dense(masked_grad=masked),
args_, masked=masked)
@all_sparse_layouts('from_layout', include_strided=False)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@parametrize("index_dtype", [torch.int32, torch.int64])
def test_to_dense(self, from_layout, device, dtype, index_dtype):
"""
This test tests conversion from any layout to strided layout.
"""
for t in self.generate_simple_inputs(
from_layout, device=device, dtype=dtype, index_dtype=index_dtype):
r = t.to_dense()
self.assertEqual(r.layout, torch.strided)
self.assertEqual(r, t)
@all_sparse_layouts('from_layout', include_strided=False)
@dtypes(torch.float64, torch.complex128)
@parametrize("index_dtype", [torch.int64])
@gradcheck_semantics()
def test_gradcheck_to_dense(self, from_layout, device, dtype, index_dtype, gradcheck):
for t in self.generate_simple_inputs(
from_layout, device=device, dtype=dtype, index_dtype=index_dtype):
batch_dim = t.dim() - t.dense_dim() - t.sparse_dim()
if batch_dim > 0:
# TODO: implement batch support in _convert_indices_from_csr_to_coo
continue
t = t.clone().detach().requires_grad_(True)
r = gradcheck(lambda x: torch.Tensor.to_dense(x, masked_grad=gradcheck.masked), t)
self.assertTrue(r)
@all_sparse_layouts('from_layout', include_strided=True)
@all_sparse_layouts('to_layout', include_strided=False)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@parametrize("index_dtype", [torch.int32, torch.int64])
def test_to_sparse(self, from_layout, to_layout, device, dtype, index_dtype):
"""
This test tests conversion from any layout to any sparse layout.
"""
for t in self.generate_simple_inputs(
from_layout, device=device, dtype=dtype, index_dtype=index_dtype,
enable_hybrid=(
# TODO: to support conversion strided->hybrid
# CSR/CSC/BSR/BSC, to_sparse() requires extra keyword
# argument, either nof_batch_dims or
# nof_dense_dims
not (from_layout is torch.strided and to_layout in
{torch.sparse_bsr, torch.sparse_bsc, torch.sparse_csr, torch.sparse_csc}))):
if to_layout in {torch.sparse_bsr, torch.sparse_bsc}:
if from_layout == torch.sparse_bsr:
batch_ndim = t.crow_indices().dim() - 1
blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
elif from_layout == torch.sparse_bsc:
batch_ndim = t.ccol_indices().dim() - 1
blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
else:
blocksize = (1, 1)
else:
blocksize = None
if from_layout is torch.strided:
is_batch = None
is_hybrid = None
else:
is_batch = t.dim() > (t.sparse_dim() + t.dense_dim())
is_hybrid = t.dense_dim() > 0
def explicit_to_sparse(x):
# Used to check that the explicit conversion methods
# are consistent with the `to_sparse(*, layout,
# blocksize)` method.
if to_layout is torch.sparse_coo:
return x.to_sparse_coo()
elif to_layout is torch.sparse_csr:
return x.to_sparse_csr()
elif to_layout is torch.sparse_csc:
return x.to_sparse_csc()
elif to_layout is torch.sparse_bsr:
return x.to_sparse_bsr(blocksize)
elif to_layout is torch.sparse_bsc:
return x.to_sparse_bsc(blocksize)
else:
assert 0 # unreachable
# TODO: The following exception cases all correspond to
# not implemented conversions
if from_layout in {
torch.sparse_csr, torch.sparse_csc} and to_layout in {torch.sparse_bsr, torch.sparse_bsc} and is_batch:
with self.assertRaisesRegex(
RuntimeError,
r"conversion from Sparse(Csr|Csc) to Sparse(Bsr|Bsc) for batched inputs is not supported"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(
RuntimeError,
r"conversion from Sparse(Csr|Csc) to Sparse(Bsr|Bsc) for batched inputs is not supported"):
explicit_to_sparse(t)
continue
elif from_layout is torch.sparse_coo and to_layout in {
torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} and t.sparse_dim() != 2:
with self.assertRaisesRegex(
RuntimeError,
r"conversion from Sparse to .* for input tensors with sparse_dim\(\)!=2 is not supported"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(
RuntimeError,
r"conversion from Sparse to .* for input tensors with sparse_dim\(\)!=2 is not supported"):
explicit_to_sparse(t)
continue
elif (from_layout, to_layout) in {(torch.sparse_bsc, torch.sparse_csr), (torch.sparse_bsc, torch.sparse_csc),
(torch.sparse_bsr, torch.sparse_csr), (torch.sparse_bsr, torch.sparse_csc)}:
with self.assertRaisesRegex(
RuntimeError,
r"sparse_compressed_to_sparse_(csr|csc|bsr|bsc): expected\s*(Sparse(Csc|Csr)[,]|)\s*Sparse(Csr|Bsr)"
" or Sparse(Csc|Bsc) layout but got Sparse(Csr|Csc|Bsr|Bsc)"):
t.to_sparse(layout=to_layout, blocksize=blocksize)
with self.assertRaisesRegex(
RuntimeError,
r"sparse_compressed_to_sparse_(csr|csc|bsr|bsc): expected\s*(Sparse(Csc|Csr)[,]|)\s*Sparse(Csr|Bsr)"
" or Sparse(Csc|Bsc) layout but got Sparse(Csr|Csc|Bsr|Bsc)"):
explicit_to_sparse(t)
self.skipTest('NOT IMPL')
else:
r = t.to_sparse(layout=to_layout, blocksize=blocksize)
self.assertEqual(r.layout, to_layout)
# to_sparse method uses unsafe construction of sparse
# tensors. Here we explicitly validate the results to
# make sure that the sparse tensors are consistent
# with the corresponding sparse tensor invariants.
if r.layout in {torch.sparse_csr, torch.sparse_bsr, torch.sparse_csc, torch.sparse_bsc}:
if r.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = r.crow_indices(), r.col_indices()
else:
compressed_indices, plain_indices = r.ccol_indices(), r.row_indices()
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, r.values(),
r.shape, r.layout)
if from_layout in {torch.strided, torch.sparse_coo}:
self.assertEqual(compressed_indices.dtype, torch.int64)
self.assertEqual(plain_indices.dtype, torch.int64)
else:
self.assertEqual(compressed_indices.dtype, index_dtype)
self.assertEqual(plain_indices.dtype, index_dtype)
self.assertEqual(r.values().dtype, dtype)
elif r.layout is torch.sparse_coo:
if t.layout is torch.sparse_coo:
self.assertEqual(t.is_coalesced(), r.is_coalesced())
# Check r is truly coalesced when r.is_coalesced == True
if r.is_coalesced():
self.assertTrue(is_coalesced_indices(r))
torch._validate_sparse_coo_tensor_args(r._indices(), r._values(), r.shape)
self.assertEqual(r._indices().dtype, torch.int64)
self.assertEqual(r._values().dtype, dtype)
else:
assert 0 # unreachable
# Finally, we'll test tensor equality:
self.assertEqual(r, t)
# Also, check consistency with explicit conversion methods:
r2 = explicit_to_sparse(t)
self.assertEqual(r2, r)
# Check inverse conversion from sparse compressed block tensors
if from_layout == torch.sparse_bsr:
batch_ndim = t.crow_indices().dim() - 1
from_blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
elif from_layout == torch.sparse_bsc:
batch_ndim = t.ccol_indices().dim() - 1
from_blocksize = t.values().shape[batch_ndim + 1:batch_ndim + 3]
else:
continue
if r.ndim != 2:
continue
t2 = r.to_sparse(layout=from_layout, blocksize=from_blocksize)
self.assertEqual(t2, t)
# extra tests
if (from_layout, to_layout) == (torch.sparse_csr, torch.sparse_bsr):
# See gh-90910
t = torch.tensor([[0, 0, 1, 0], [0, 1, 0, 0]], dtype=dtype, device=device).to_sparse_csr()
r = t.to_sparse_bsr((2, 2))
torch._validate_sparse_compressed_tensor_args(r.crow_indices(), r.col_indices(), r.values(), r.shape, r.layout)
self.assertEqual(r, t)
if (from_layout, to_layout) in {(torch.sparse_csr, torch.sparse_csc),
(torch.sparse_csc, torch.sparse_csr)}:
# See gh-91007
compressed_indices = torch.tensor([0, 4, 8, 8, 12, 16, 20], dtype=index_dtype, device=device)
plain_indices = torch.tensor([0, 1, 2, 3] * 5, dtype=index_dtype, device=device)
t = torch.sparse_compressed_tensor(compressed_indices, plain_indices, range(20),
dtype=dtype, device=device, layout=from_layout)
r = t.to_sparse(layout=to_layout)
if r.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = r.crow_indices(), r.col_indices()
else:
compressed_indices, plain_indices = r.ccol_indices(), r.row_indices()
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, r.values(), r.shape, r.layout)
self.assertEqual(r, t)
@onlyNativeDeviceTypes
@suppress_warnings
@ops(reduction_ops_with_sparse_support)
@precisionOverride({torch.bfloat16: 5e-4, torch.float16: 5e-3})
@all_sparse_layouts('layout', include_strided=False)
def test_reductions(self, layout, device, dtype, op):
count = 0
for sample in op.sample_inputs_sparse(layout, device, dtype):
count += 1
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
result = op.op(t_inp, *t_args, **t_kwargs)
# Checking invariant rop(inp, ...).to_dense() == rop(inp.to_dense(), ...)
dense = op.op(t_inp.to_dense(), *t_args, **t_kwargs)
self.assertEqual(result, dense)
if count == 0:
# we count samples to avoid false-positive test reports
self.skipTest('no sample inputs')
@onlyNativeDeviceTypes
@suppress_warnings
@ops(reduction_ops_with_sparse_support, allowed_dtypes=(torch.float32, torch.float64, torch.complex64, torch.complex128))
@all_sparse_layouts('layout', include_strided=False)
def test_reductions_backward(self, layout, device, dtype, op):
count = 0
for sample in op.sample_inputs_sparse(layout, device, dtype, requires_grad=True):
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
r = op.op(t_inp, *t_args, **t_kwargs)
if r.numel() != 0:
r = r.sum()
if op.name == 'sum':
count += 1
r.abs().backward()
self.assertEqual(t_inp.grad, torch.ones(t_inp.shape, dtype=dtype, device=device) * torch.sgn(r))
else:
self.skipTest('NOT IMPL')
if count == 0:
# we count samples to avoid false-positive test reports
self.skipTest('no sample inputs')
@onlyNativeDeviceTypes
@suppress_warnings
@parametrize("mth", [subtest(mth, name=mth.__name__)
for mth in [torch.Tensor.is_coalesced,
torch.Tensor.coalesce,
torch.Tensor.indices,
torch.Tensor.values,
torch.Tensor.crow_indices,
torch.Tensor.col_indices,
torch.Tensor.ccol_indices,
torch.Tensor.row_indices,
]])
@all_sparse_layouts('layout', include_strided=True)
def test_unsupported_backend_error_message(self, mth, layout, device):
inp = torch.tensor([[1, 2], [3, 4]], device=device).to_sparse(
layout=layout,
blocksize=(1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None)
assert inp.layout is layout
expected_behaviour = dict(
# <mth name> = (<supported layouts>, <exception message on other layouts>)
is_coalesced=({torch.sparse_coo},
"is_coalesced expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
coalesce=({torch.sparse_coo},
"coalesce expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
indices=({torch.sparse_coo},
"indices expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
values=({torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc},
"values expected sparse tensor layout but got Strided"),
crow_indices=({torch.sparse_csr, torch.sparse_bsr},
"crow_indices expected sparse row compressed tensor layout but got (Sparse(Csc|Bsc|)|Strided)"),
col_indices=({torch.sparse_csr, torch.sparse_bsr},
"col_indices expected sparse row compressed tensor layout but got (Sparse(Csc|Bsc|)|Strided)"),
ccol_indices=({torch.sparse_csc, torch.sparse_bsc},
"ccol_indices expected sparse column compressed tensor layout but got (Sparse(Csr|Bsr|)|Strided)"),
row_indices=({torch.sparse_csc, torch.sparse_bsc},
"row_indices expected sparse column compressed tensor layout but got (Sparse(Csr|Bsr|)|Strided)"),
)[mth.__name__]
if layout in expected_behaviour[0]:
mth(inp)
else:
with self.assertRaisesRegex(RuntimeError, expected_behaviour[1]):
mth(inp)
@onlyNativeDeviceTypes
@all_sparse_layouts('layout', include_strided=not True)
@dtypes(torch.float64, torch.cdouble)
@parametrize("masked", [subtest(False, name='sparse'), subtest(True, name='masked')])
@parametrize("fast_mode", [subtest(False, name='slow'), subtest(True, name='fast')])
def test_gradcheck_mm(self, layout, dtype, device, masked, fast_mode):
# This function does not check the following cases:
# - batch or hybrid tensors because addmm does not support
# such inputs yet
# - check_forward_ad=True because of the lack of sparse tensor
# support in aten::view_as_real, torch._VF._make_dual, etc.
ref_x = torch.tensor([[1, 2, 0, 0],
[0, 6, 0, 0],
[0, 0, 0, 0],
[13, 14, 0, 15]], dtype=dtype, device=device)
ref_y = torch.tensor([[11, 12, 13, 14],
[21, 22, 23, 24],
[31, 32, 33, 34],
[41, 42, 43, 44]],
dtype=dtype, device=device)
mm = torch.sparse.mm if masked else torch.mm
blocksize = (2, 2) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None
x = ref_x.to_sparse(layout=layout, blocksize=blocksize).requires_grad_(True)
y = ref_y.requires_grad_(True)
if layout is torch.sparse_bsr and not masked or layout is torch.sparse_bsc:
with self.assertRaisesRegex(
RuntimeError,
r"addmm: computation on (CPU|CUDA) is not implemented for Strided \+ Sparse(Bsr|Bsc) @ Strided"):
torch.autograd.gradcheck(mm, (x, y), fast_mode=fast_mode, masked=masked)
self.skipTest('NOT IMPL')
elif layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} and masked:
with self.assertRaisesRegex(
RuntimeError,
r"(sparse_addmm_sparse_backward: unsupported combination of layouts,"
r" grad: Strided, mat1: Sparse(Csc|Bsr|Bsc), mat2: Strided"
r"|addmm: computation on (CPU|CUDA) is not implemented for "
r"Strided \+ Sparse(Csc|Bsr|Bsc) @ Strided without MKL)"):
torch.autograd.gradcheck(mm, (x, y), fast_mode=fast_mode, masked=masked)
self.skipTest('NOT IMPL')
else:
torch.autograd.gradcheck(mm, (x, y), fast_mode=fast_mode, masked=masked)
@onlyNativeDeviceTypes
@suppress_warnings
@ops(binary_ufuncs_with_sparse_support)
@all_sparse_layouts('layout', include_strided=False)
def test_binary_operation(self, layout, device, dtype, op):
if not op.supports_sparse_layout(layout):
self.skipTest(f'{layout} is not supported in `{op.name}` OpInfo definition. Skipping!')
for sample in op.sample_inputs_sparse(layout, device, dtype):
if validate_sample_input_sparse(op, sample, check_validate=False) is not sample:
# that is, the validation returns the sparse sample
# wrapped within ErrorInput instance
continue
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim()
result = op.op(t_inp, *t_args, **t_kwargs)
# Check rop(inp, ...).shape == inp.shape
self.assertEqual(result.shape, t_inp.shape)
# Check rop(inp, ...).sparse_dim() == inp.sparse_dim()
self.assertEqual(result.sparse_dim(), t_inp.sparse_dim())
# Check rop(inp, ...).dense_dim() == inp.dense_dim()
self.assertEqual(result.dense_dim(), t_inp.dense_dim())
# Check invariant rop(inp, ...).to_dense() == rop(inp.to_dense(), ...)
try:
dense = op.op(t_inp.to_dense(), *(t_args[0].to_dense(), *t_args[1:]), **t_kwargs)
except Exception as msg:
# this is strided op issue, so skipping the sample silently here
if "\"cpublas_axpy_impl\" not implemented for 'ComplexHalf'" in str(msg):
continue
raise
self.assertEqual(result, dense)
@onlyCPU
@all_sparse_layouts('layout', include_strided=True)
@dtypes(torch.double)
def test_to_sparse_identity(self, device, layout, dtype):
for dense_dim in range(4):
x_dense = torch.eye(dense_dim, dtype=dtype, device=device)
for sparse_dim_in in range(1, dense_dim):
x_sparse = x_dense.to_sparse(sparse_dim_in)
for sparse_dim_out in range(0, dense_dim):
if sparse_dim_out == sparse_dim_in:
self.assertTrue(x_sparse.to_sparse(sparse_dim_out).sparse_dim() == sparse_dim_out)
else:
with self.assertRaisesRegex(
RuntimeError,
r"to_sparse: conversion from Sparse to Sparse with sparse_dim argument !=self.sparse_dim\(\)"
" is not supported"):
x_sparse.to_sparse(sparse_dim_out)
@onlyNativeDeviceTypes
@suppress_warnings
@ops(like_fns_with_sparse_support)
@all_sparse_layouts('layout', include_strided=False)
def test_like_fns(self, layout, device, dtype, op):
for sample in op.sample_inputs_sparse(layout, device, dtype):
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim()
if t_inp.layout in {torch.sparse_bsr, torch.sparse_bsc}:
expected_blocksize = t_inp.values().shape[batch_dim + 1:batch_dim + 3]
else:
expected_blocksize = None
expected_dtype = t_kwargs.get('dtype', dtype)
expected_device = torch.device(t_kwargs.get('device', device))
expected_layout = t_kwargs.get('layout', layout)
result = op.op(t_inp, *t_args, **t_kwargs)
self.assertEqual(result.dtype, expected_dtype)
self.assertEqual(result.device.type, expected_device.type)
self.assertEqual(result.layout, expected_layout)
if result.layout in {torch.sparse_bsr, torch.sparse_bsc}:
result_batch_dim = result.dim() - result.dense_dim() - result.sparse_dim()
blocksize = result.values().shape[result_batch_dim + 1:result_batch_dim + 3]
self.assertEqual(blocksize, expected_blocksize)
# Check op(inp).shape == inp.shape
self.assertEqual(result.shape, t_inp.shape)
if expected_layout is torch.strided:
self.assertEqual(result.sparse_dim(), 0)
# Check op(inp, layout=torch.strided).dense_dim() == inp.dim()
self.assertEqual(result.dense_dim(), t_inp.dim())
elif expected_layout is torch.sparse_coo:
# Check op(inp, layout=torch.sparse_coo).sparse_dim() == batch_dim + inp.sparse_dim()
self.assertEqual(result.sparse_dim(), batch_dim + t_inp.sparse_dim())
# Check op(inp, layout=torch.sparse_coo).dense_dim() == inp.dense_dim()
self.assertEqual(result.dense_dim(), t_inp.dense_dim())
torch._validate_sparse_coo_tensor_args(result._indices(), result._values(), result.shape)
else:
# Check op(inp).sparse_dim() == inp.sparse_dim()
self.assertEqual(result.sparse_dim(), t_inp.sparse_dim())
# Check op(inp).dense_dim() == inp.dense_dim()
self.assertEqual(result.dense_dim(), t_inp.dense_dim())
if result.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = result.crow_indices(), result.col_indices()
else:
compressed_indices, plain_indices = result.ccol_indices(), result.row_indices()
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, result.values(),
result.shape, result.layout)
@all_sparse_layouts('mask_layout', include_strided=False)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_mask(self, mask_layout, device, dtype):
input_layout = torch.strided
mask_dtype = torch.bool
for mask in self.generate_simple_inputs(mask_layout, dtype=mask_dtype, device=device,
enable_hybrid=False, enable_batch=False):
x = make_tensor(mask.shape, dtype=dtype, device=device).to_sparse(layout=input_layout)
result = x.sparse_mask(mask)
# Check invariant `x.sparse_mask(mask).<indices> == mask.<indices>`
if mask_layout is torch.sparse_coo:
self.assertEqual(result._indices(), mask._indices())
ones = torch.sparse_coo_tensor(mask._indices(),
torch.ones_like(mask._values(), dtype=x.dtype),
mask.shape,
is_coalesced=mask.is_coalesced())
elif mask_layout in {torch.sparse_csr, torch.sparse_bsr}:
self.assertEqual(result.crow_indices(), mask.crow_indices())
self.assertEqual(result.col_indices(), mask.col_indices())
ones = torch.sparse_compressed_tensor(mask.crow_indices(), mask.col_indices(),
torch.ones_like(mask.values(), dtype=x.dtype),
mask.shape, layout=mask.layout)
else:
self.assertEqual(result.ccol_indices(), mask.ccol_indices())
self.assertEqual(result.row_indices(), mask.row_indices())
ones = torch.sparse_compressed_tensor(mask.ccol_indices(), mask.row_indices(),
torch.ones_like(mask.values(), dtype=x.dtype),
mask.shape, layout=mask.layout)
# Check invariant:
# x.sparse_mask(mask).to_dense() == x.mul(sparse_xyz_tensor(<mask indices>,
# ones_like(<mask values>)).to_dense())
expected = x.mul(ones.to_dense())
self.assertEqual(result.to_dense(), expected)
# Check invariant `mask.to_dense().sparse_mask(mask) == mask`
result = mask.to_dense().sparse_mask(mask)
self.assertEqual(result, mask)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("masked", [subtest(False, name='nonmasked'), subtest(True, name='masked')])
@parametrize("fast_mode", [subtest(False, name='slow'), subtest(True, name='fast')])
def test_as_sparse_gradcheck(self, layout, device, masked, fast_mode):
gradcheck = torch.sparse.as_sparse_gradcheck(torch.autograd.gradcheck)
sparse_compressed_layouts = {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}
def identity(x):
return x
for func in (torch.Tensor.to_dense,
torch.Tensor.sum,
identity,
torch.Tensor.to_sparse,
torch.Tensor.values,
):
for x in self.generate_simple_inputs(
layout,
device=device,
dtype=torch.float64,
# TODO: fix gh-104868 to enable batched samples:
enable_batch=layout not in sparse_compressed_layouts,
enable_hybrid=not (
layout in sparse_compressed_layouts and (
# FIXME: RuntimeError: sparse_mask(): the
# number of sparse dimensions in `self`
# should match that of the `mask`. Got
# `self.sparse_dim() == 3` !=
# `mask.sparse_dim() == 2
func.__name__ == 'sum'
# FIXME: RuntimeError: expected
# col_indices to be a contiguous tensor
# per batch
or func.__name__ == 'to_sparse'
))):
if layout is torch.sparse_coo and func.__name__ == 'values':
x = x.coalesce()
gradcheck(func, x.requires_grad_(True), masked=masked, fast_mode=fast_mode)
@onlyCPU
@all_sparse_layouts('layout', include_strided=False)
@dtypes(torch.double)
def test_dataloader(self, device, layout, dtype):
data = list(self.generate_simple_inputs(layout, device=device, dtype=dtype))
dataset = _SparseDataset(data)
loader = torch.utils.data.DataLoader(dataset, batch_size=None, num_workers=2)
loaded_data = list(loader)
self.assertEqual(data, loaded_data)
@onlyCPU
def test_invalid_blocksize(self):
# Blocksize should be a tuple/list/torch.Size containing two values
with self.assertRaisesRegex(RuntimeError, ".*blocksize.*, but got 1"):
torch.randn(1).to_sparse(blocksize=(1,))
with self.assertRaisesRegex(RuntimeError, ".*blocksize.*, but got 1"):
torch.randn(1).to_sparse(blocksize=[1])
with self.assertRaisesRegex(RuntimeError, ".*blocksize.*, but got 1"):
torch.randn(1).to_sparse(blocksize=torch.Size((1,)))
with self.assertRaisesRegex(RuntimeError, ".*blocksize.*, but got 3"):
torch.randn(1).to_sparse(blocksize=(1, 1, 1))
with self.assertRaisesRegex(RuntimeError, ".*blocksize.*, but got 3"):
torch.randn(1).to_sparse(blocksize=[1, 1, 1])
with self.assertRaisesRegex(RuntimeError, ".*blocksize.*, but got 3"):
torch.randn(1).to_sparse(blocksize=torch.Size((1, 1, 1)))
@unittest.skipIf(not torch.cuda.is_available(), 'requires cuda')
@onlyCPU
@all_sparse_layouts('layout', include_strided=True)
def test_constructor_pin_memory(self, device, layout):
"""Tests sparse_xyz_tensor(indices, values, pin_memory=True)
"""
self.assertEqual(device, "cpu")
for t in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_zero_sized=False, # pinning zero-sized tensors is a no-op
pin_memory=True,
enable_batch=False, # TODO: remove after gh-104868 is resolved
):
if layout is torch.sparse_coo:
self.assertTrue(t._indices().is_pinned())
self.assertTrue(t._values().is_pinned())
elif layout in {torch.sparse_csr, torch.sparse_bsr}:
self.assertTrue(t.crow_indices().is_pinned())
self.assertTrue(t.col_indices().is_pinned())
self.assertTrue(t.values().is_pinned())
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
self.assertTrue(t.ccol_indices().is_pinned())
self.assertTrue(t.row_indices().is_pinned())
self.assertTrue(t.values().is_pinned())
elif layout is torch.strided:
pass
else:
assert 0 # unreachable
self.assertTrue(t.is_pinned())
@unittest.skipIf(not torch.cuda.is_available(), 'requires cuda')
@onlyCPU
@all_sparse_layouts('layout', include_strided=True)
def test_method_pin_memory(self, device, layout):
"""Tests sparse_xyz_tensor(indices, values, pin_memory=False).pin_memory()
"""
for t_ in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_zero_sized=False, # pinning zero-sized tensors is a no-op
pin_memory=False, # no pinning
enable_batch=False, # TODO: remove after gh-104868 is resolved
):
t = t_.pin_memory()
self.assertTrue(t.is_pinned())
# registering a non-pinned tensor with CUDA memory is a
# clone operation
self.assertFalse(t_.is_pinned())
# registering already pinned tensor with CUDA memory is an
# identity operation:
t2 = t.pin_memory()
self.assertTrue(t2 is t)
if layout is torch.sparse_coo:
self.assertTrue(t._indices().is_pinned())
self.assertTrue(t._values().is_pinned())
self.assertFalse(t_._indices().is_pinned())
self.assertFalse(t_._values().is_pinned())
elif layout in {torch.sparse_csr, torch.sparse_bsr}:
self.assertTrue(t.crow_indices().is_pinned())
self.assertTrue(t.col_indices().is_pinned())
self.assertTrue(t.values().is_pinned())
self.assertFalse(t_.crow_indices().is_pinned())
self.assertFalse(t_.col_indices().is_pinned())
self.assertFalse(t_.values().is_pinned())
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
self.assertTrue(t.ccol_indices().is_pinned())
self.assertTrue(t.row_indices().is_pinned())
self.assertTrue(t.values().is_pinned())
self.assertFalse(t_.ccol_indices().is_pinned())
self.assertFalse(t_.row_indices().is_pinned())
self.assertFalse(t_.values().is_pinned())
elif layout is torch.strided:
pass
else:
assert 0 # unreachable
@unittest.skipIf(not torch.cuda.is_available(), 'requires cuda')
@onlyCPU
@all_sparse_layouts('layout', include_strided=True)
def test_constructor_pinned_memory(self, device, layout):
"""Tests sparse_xyz_tensor(indices.pin_memory(device), values.pin_memory(device))
"""
pin_memory_device = "cuda"
for t in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_zero_sized=False, # pinning zero-sized tensors is a no-op
pin_memory=None, # constructor does not specify pin_memory=...
members_pin_memory=True, # indices and values are pinned
enable_batch=False, # TODO: remove after gh-104868 is resolved
):
if layout is torch.sparse_coo:
self.assertTrue(t._indices().is_pinned())
self.assertTrue(t._values().is_pinned())
elif layout in {torch.sparse_csr, torch.sparse_bsr}:
self.assertTrue(t.crow_indices().is_pinned())
self.assertTrue(t.col_indices().is_pinned())
self.assertTrue(t.values().is_pinned())
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
self.assertTrue(t.ccol_indices().is_pinned())
self.assertTrue(t.row_indices().is_pinned())
self.assertTrue(t.values().is_pinned())
elif layout is torch.strided:
pass
else:
assert 0 # unreachable
self.assertTrue(t.is_pinned())
@unittest.skipIf(not torch.cuda.is_available(), 'requires cuda')
@onlyCPU
@all_sparse_layouts('layout', include_strided=False)
def test_constructor_mismatched_pinned_memory(self, device, layout):
"""Test the failure to construct sparse tensor from indices and values
that have different pinning states.
"""
def generic_constructor(*args, **kwargs):
if layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
kwargs.update(layout=layout)
return torch.sparse_compressed_tensor(*args, **kwargs)
elif layout is torch.sparse_coo:
return torch.sparse_coo_tensor(*args, **kwargs)
else:
raise NotImplementedError(layout)
for args, kwargs in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_zero_sized=False, # pinning zero-sized tensors is a no-op
enable_batch=False, # TODO: remove after gh-104868 is resolved
output_tensor=False):
# indices are pinned, values is a non-pinned tensor
args1 = (args[0].pin_memory(), *args[1:])
# indices are non-pinned, values is a pinned tensor
args2 = (*args[:-1], args[-1].pin_memory())
with self.assertRaisesRegex(
RuntimeError, r"memory pinning of \w*indices \(=1\) must match memory pinning of values \(=0\)"):
generic_constructor(*args1, **kwargs)
with self.assertRaisesRegex(
RuntimeError, r"memory pinning of \w*indices \(=0\) must match memory pinning of values \(=1\)"):
generic_constructor(*args2, **kwargs)
# e.g., TestSparseUnaryUfuncsCPU and TestSparseUnaryUfuncsCUDA
instantiate_device_type_tests(TestSparseUnaryUfuncs, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseMaskedReductions, globals(), except_for='meta')
# e.g., TestSparseCPU and TestSparseCUDA
instantiate_device_type_tests(TestSparse, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseAny, globals(), except_for='meta')
instantiate_parametrized_tests(TestSparseMeta)
instantiate_parametrized_tests(TestSparseLegacyAndDeprecation)
if __name__ == '__main__':
run_tests()
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_sparse_fn_grad
|
def test_sparse_fn_grad(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Op doesn't support autograd")
for sample in op.sample_inputs(device, dtype):
sparse_input = sample.input.to_sparse().detach().requires_grad_(True)
def fn(x):
return _sparse_to_dense(
op(x, *sample.args, **sample.kwargs))
self.assertTrue(gradcheck(
fn,
(sparse_input,),
check_batched_grad=False,
check_grad_dtypes=True,
check_sparse_nnz=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode))
|
def test_sparse_fn_grad(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Op doesn't support autograd")
for sample in op.sample_inputs(device, dtype):
sparse_input = sample.input.to_sparse().detach().requires_grad_(True)
def fn(x):
return _sparse_to_dense(
op(x, *sample.args, **sample.kwargs))
self.assertTrue(gradcheck(
fn,
(sparse_input,),
check_batched_grad=False,
check_grad_dtypes=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode,
masked=True))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseUnaryUfuncs(TestCase):
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseUnaryUfuncs(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_future_empty_dim
|
def test_future_empty_dim(self, device, dtype, op):
"""Currently, `dim=()` in reductions operations means "reduce over
all dimensions" while in future, it will read "no reduce". See
https://github.com/pytorch/pytorch/issues/29137
For sparse masked reductions, we'll implement the current behavior.
For testing, we'll use samples with `dim=0` and map it to
`dim=()` until
torch.testing._internal.common_methods_invocations._generate_reduction_kwargs
is made to generate samples with `dim=()` for non-scalar
inputs. With this and after gh-29137 is resolved, this test
can be deleted. See also `torch.masked._canonical_dim`
implementation about changing the `dim=()` behavior.
"""
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
op_name = op.name.replace('masked.', '')
for sample_input in samples:
if sample_input.kwargs.get('dim') != 0:
continue
sample_input_kwargs = dict(sample_input.kwargs)
sample_input_kwargs['dim'] = () # reduce over all dimensions
t = sample_input.input
mask = sample_input_kwargs.get('mask')
if mask is None and op_name in {'prod', 'amax', 'amin'}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch.masked.prod implementation
# for details.
continue
sparse_op_kwargs = dict(sample_input_kwargs)
actual = op(t.to_sparse(), *sample_input.args, **sample_input_kwargs)
self.assertEqual(actual.layout, torch.sparse_coo)
expected = op(t, *sample_input.args, **sample_input_kwargs).to_sparse()
self.assertEqual(actual, expected)
|
def test_future_empty_dim(self, device, dtype, op):
"""Currently, `dim=()` in reductions operations means "reduce over
all dimensions" while in future, it will read "no reduce". See
https://github.com/pytorch/pytorch/issues/29137
For sparse masked reductions, we'll implement the current behavior.
For testing, we'll use samples with `dim=0` and map it to
`dim=()` until
torch.testing._internal.common_methods_invocations._generate_reduction_kwargs
is made to generate samples with `dim=()` for non-scalar
inputs. With this and after gh-29137 is resolved, this test
can be deleted. See also `torch.masked._canonical_dim`
implementation about changing the `dim=()` behavior.
"""
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
op_name = op.name.replace('masked.', '')
for sample_input in samples:
if sample_input.kwargs.get('dim') != 0:
continue
sample_input_kwargs = dict(sample_input.kwargs)
sample_input_kwargs['dim'] = () # reduce over all dimensions
t = sample_input.input
mask = sample_input_kwargs.get('mask')
if mask is None and op_name in {'prod', 'amax', 'amin'}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch.masked.prod implementation
# for details.
continue
sparse_op_kwargs = dict(sample_input_kwargs)
actual = op(t.to_sparse(), *sample_input.args, **sample_input_kwargs)
self.assertEqual(actual.layout, torch.sparse_coo)
expected = op(t, *sample_input.args, **sample_input_kwargs).to_sparse()
atol = None
rtol = None
if op.name in self.fp16_low_precision_list and dtype == torch.half:
atol = 1e-5
rtol = 2e-3
self.assertEqual(actual, expected, atol=atol, rtol=rtol)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseMaskedReductions(TestCase):
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseMaskedReductions(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
_test_meta_sparse_coo
|
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r, r2)
r3 = torch.sparse_coo_tensor(size=(4, 4), device='meta')
self.assertTrue(r3.is_meta)
self.assertEqual(r, r3)
r.sparse_resize_((4, 4), 1, 1)
r.sparse_resize_and_clear_((4, 4, 4), 2, 1)
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), 1)
self.assertEqual(r._dimV(), 1)
self.assertEqual(r._nnz(), 0)
# nnz zero sparse tensors should always be coalesced at creation
self.assertEqual(r.is_coalesced(), True)
# but we can force them into the uncoalesed state
r._coalesced_(False)
self.assertEqual(r.is_coalesced(), False)
# return the coalesced state for indices/values access
r._coalesced_(True)
# TODO: this sort of aliasing will need to be handled by
# functionalization
self.assertEqual(r._indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r._values(), torch.empty(0, 4, device='meta'))
self.assertEqual(r.indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r.values(), torch.empty(0, 4, device='meta'))
|
def _test_meta_sparse_coo(self, dtype):
r = torch.empty(4, 4, layout=torch.sparse_coo, device='meta', dtype=dtype)
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r, r2)
r3 = torch.sparse_coo_tensor(size=(4, 4), device='meta', dtype=dtype)
self.assertTrue(r3.is_meta)
self.assertEqual(r, r3)
r.sparse_resize_((4, 4), 1, 1)
r.sparse_resize_and_clear_((4, 4, 4), 2, 1)
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), 1)
self.assertEqual(r._dimV(), 1)
self.assertEqual(r._nnz(), 0)
# nnz zero sparse tensors should always be coalesced at creation
self.assertEqual(r.is_coalesced(), True)
# but we can force them into the uncoalesed state
r._coalesced_(False)
self.assertEqual(r.is_coalesced(), False)
# return the coalesced state for indices/values access
r._coalesced_(True)
# TODO: this sort of aliasing will need to be handled by
# functionalization
self.assertEqual(r._indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r._values(), torch.empty(0, 4, device='meta', dtype=dtype))
self.assertEqual(r.indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r.values(), torch.empty(0, 4, device='meta', dtype=dtype))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseMeta(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse.py
|
sparse_softmax
|
def sparse_softmax(sparse, dim):
"""Pure Python softmax of a sparse tensor. Assuming -inf for
unspecified sparse tensor data. This is a prototype of
sparse softmax algorithm in Python.
"""
dtype = sparse.dtype
device = sparse.device
# softmax is non-linear operation, so sparse tensors must
# be coalesced.
sparse = sparse.coalesce()
inf = float('inf')
indices = sparse._indices()
values = sparse._values()
if dim < sparse.sparse_dim():
nnz = sparse._nnz()
# compute pool indices
size = sparse.size()
strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)
for i in reversed(range(sparse.sparse_dim() - 1)):
strides[i, 0] = strides[i + 1, 0] * size[i + 1]
strides[dim, 0] = 0
pool = (indices * strides).sum(dim=0)
i2p = {}
for i in range(nnz):
c = int(pool[i])
if c not in i2p:
i2p[c] = len(i2p)
pool[i] = i2p[c]
# compute max
dense_size = tuple(size[sparse.sparse_dim():])
mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)
mx[:] = -inf
for n in range(nnz):
p = pool[n]
mx[p] = torch.max(mx[p], values[n])
# apply exp to (v - mx) and sum the results
exp_values = torch.empty_like(values)
exp_sums = torch.zeros_like(mx)
for n in range(nnz):
p = pool[n]
v = exp_values[n] = (values[n] - mx[p]).exp()
exp_sums[p] = exp_sums[p] + v
# normalize with the sum of exponents
for n in range(nnz):
p = pool[n]
exp_values[n] = exp_values[n] / exp_sums[p]
return torch.sparse_coo_tensor(indices,
exp_values,
sparse.size(),
dtype=dtype, device=device)
elif dim < sparse.sparse_dim() + sparse.dense_dim():
return torch.sparse_coo_tensor(indices,
F.softmax(values, dim - sparse.sparse_dim() + 1),
sparse.size(),
dtype=dtype, device=device)
else:
raise ValueError(
'`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'
% (dim, sparse.sparse_dim(), sparse.dense_dim()))
|
def sparse_softmax(sparse, dim):
"""Pure Python softmax of a sparse tensor. Assuming -inf for
unspecified sparse tensor data. This is a prototype of
sparse softmax algorithm in Python.
"""
dtype = sparse.dtype
device = sparse.device
# softmax is non-linear operation, so sparse tensors must
# be coalesced.
sparse = sparse.coalesce()
inf = float('inf')
indices = sparse._indices()
values = sparse._values()
if dim < sparse.sparse_dim():
nnz = sparse._nnz()
# compute pool indices
size = sparse.size()
strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)
for i in reversed(range(sparse.sparse_dim() - 1)):
strides[i, 0] = strides[i + 1, 0] * size[i + 1]
strides[dim, 0] = 0
pool = (indices * strides).sum(dim=0)
i2p = {}
for i in range(nnz):
c = int(pool[i])
if c not in i2p:
i2p[c] = len(i2p)
pool[i] = i2p[c]
# compute max
dense_size = tuple(size[sparse.sparse_dim():])
mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)
mx[:] = -inf
for n in range(nnz):
p = pool[n]
mx[p] = torch.max(mx[p], values[n])
# apply exp to (v - mx) and sum the results
exp_values = torch.empty_like(values)
exp_sums = torch.zeros_like(mx)
for n in range(nnz):
p = pool[n]
v = exp_values[n] = (values[n] - mx[p]).exp()
exp_sums[p] = exp_sums[p] + v
# normalize with the sum of exponents
for n in range(nnz):
p = pool[n]
exp_values[n] = exp_values[n] / exp_sums[p]
return torch.sparse_coo_tensor(indices,
exp_values,
sparse.size(),
dtype=dtype, device=device)
elif dim < sparse.sparse_dim() + sparse.dense_dim():
return torch.sparse_coo_tensor(indices,
F.softmax(values, dim - sparse.sparse_dim() + 1),
sparse.size(),
dtype=dtype, device=device)
else:
raise ValueError(
f'`dim(={dim})` must be smaller than `sparse_dim(={sparse.sparse_dim()}) + dense_dim(={sparse.dense_dim()})`')
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_op
|
def test_op(sparse_dims, nnz, with_size, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
for dim in range(x.sparse_dim() + x.dense_dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
y1 = torch.sparse.softmax(x, dim)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, dim)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
test_op(1, 10, [3], coalesced)
test_op(1, 10, [2, 3], coalesced)
test_op(1, 10, [3, 2], coalesced)
test_op(2, 10, [2, 3, 4], coalesced)
test_op(2, 10, [3, 4], coalesced)
test_op(2, 5, [5, 4], coalesced)
test_op(2, 10, [3, 4, 2], coalesced)
test_op(3, 10, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2, 3], coalesced)
test_op(3, 100, [3, 4, 2, 3, 5, 2], coalesced)
test_op(4, 100, [3, 4, 2, 3, 5, 2], coalesced)
|
def test_op(sparse_dims, nnz, with_size, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
# Check dim out of bounds
with self.assertRaisesRegex(IndexError, r"Dimension out of range"):
torch.sparse.softmax(x, x.dim())
with self.assertRaisesRegex(IndexError, r"Dimension out of range"):
torch.sparse.softmax(x, -x.dim() - 1)
for dim in range(x.dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
for d in (dim, dim - x.dim()):
y1 = torch.sparse.softmax(x, d)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, d)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
test_op(1, 10, [3], coalesced)
test_op(1, 10, [2, 3], coalesced)
test_op(1, 10, [3, 2], coalesced)
test_op(2, 10, [2, 3, 4], coalesced)
test_op(2, 10, [3, 4], coalesced)
test_op(2, 5, [5, 4], coalesced)
test_op(2, 10, [3, 4, 2], coalesced)
test_op(3, 10, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2, 3], coalesced)
test_op(3, 100, [3, 4, 2, 3, 5, 2], coalesced)
test_op(4, 100, [3, 4, 2, 3, 5, 2], coalesced)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
sparse_log
|
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
for dim in range(x.sparse_dim() + x.dense_dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
y1 = torch.sparse.softmax(x, dim)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, dim)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
|
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
# Check dim out of bounds
with self.assertRaisesRegex(IndexError, r"Dimension out of range"):
torch.sparse.softmax(x, x.dim())
with self.assertRaisesRegex(IndexError, r"Dimension out of range"):
torch.sparse.softmax(x, -x.dim() - 1)
for dim in range(x.dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
for d in (dim, dim - x.dim()):
y1 = torch.sparse.softmax(x, d)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, d)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_sparse_matmul
|
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
# We convert grad to dense since dense and sparse mm
# implementations handle materialized zeroes differently.
self.assertEqual(a.grad.to_dense(), a_grad.to_dense())
self.assertEqual(b.grad.to_dense(), b_grad.to_dense())
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
# Check result is truly coalesced
self.assertTrue(r2.is_coalesced() and is_coalesced_indices(r2))
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), check_sparse_nnz=True, nondet_tol=1e-5)
else:
gradcheck(fn, (a, b), check_sparse_nnz=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
|
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
# We convert grad to dense since dense and sparse mm
# implementations handle materialized zeroes differently.
self.assertEqual(a.grad.to_dense(), a_grad.to_dense())
self.assertEqual(b.grad.to_dense(), b_grad.to_dense())
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
# Check result is truly coalesced
self.assertTrue(r2.is_coalesced() and is_coalesced_indices(r2))
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), nondet_tol=1e-5, masked=True)
else:
gradcheck(fn, (a, b), masked=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_add_meta
|
def test_add_meta(self, dtype, layout):
device = 'cpu'
index_dtype = torch.int64
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
expected = torch.add(t, t).to(device='meta')
m = t.to(device='meta')
r = torch.add(m, m)
self.assertEqualMeta(r, expected, 0)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseMeta(TestCase):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
__len__
|
def __len__(self):
return len(self.sparse_tensors)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
class _SparseDataset(torch.utils.data.Dataset):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
test_check_sparse_tensor_invariants
|
def test_check_sparse_tensor_invariants(self, layout):
if layout is torch.sparse_coo:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
invalid_indices = torch.tensor([[0], [3]]) # column index is out of range
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_coo_tensor(invalid_indices, values, shape)
else:
return torch.sparse_coo_tensor(invalid_indices, values, shape, check_invariants=check_invariants)
expected_exception_message = 'size is inconsistent with indices: for dim 1, size is 2 but found index 3'
elif layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
compressed_indices = torch.tensor([0, 0, 1])
invalid_plain_indices = torch.tensor([3]) # index is out of range
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
values = torch.tensor([[[1]]])
else:
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout)
else:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout,
check_invariants=check_invariants)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
expected_exception_message = r'`0 <= col_indices < ncols` is not satisfied.'
else:
expected_exception_message = r'`0 <= row_indices < nrows` is not satisfied.'
else:
raise NotImplementedError(layout)
# First, consider the case where invariant checks are disabled
# "globally" (read: within the context of this test method
# caller) as defined by check_sparse_tensor_invariants(False)
# decorator:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Enable the invariant checks in a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Leaving the local context must restore the "global" state of
# the invariant check feature:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are disabled by default, we can
# create an invalid sparse tensor without raising an
# exception:
r = create_invalid_tensor()
self.assertEqual(r.layout, layout)
# Or, when disabling the invariants check explicitly:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Enabling invariant check via constructor's optional argument
# will raise an exception when sparse tensor invariants are
# violated:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# Check that the global invariant check flag has been restored
# after raising the exception above:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Next, consider the case where invariant checks are enabled
# within a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are now enabled by default, an
# attempt to create an invalid sparse tensor will lead to
# an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor()
# Similarly, when enabling the invariant checks
# explicitly, invalid sparse tensor construction will lead
# to an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# However, invariants check can be disabled via
# constructor's optional argument so that the invalid
# tensor is succesfully constructed:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Check that the invariant check flag has been restored
# when leaving the constructor:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Double-check restoring the global state when leaving the
# local context:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
|
def test_check_sparse_tensor_invariants(self, layout):
if layout is torch.sparse_coo:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
invalid_indices = torch.tensor([[0], [3]]) # column index is out of range
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_coo_tensor(invalid_indices, values, shape)
else:
return torch.sparse_coo_tensor(invalid_indices, values, shape, check_invariants=check_invariants)
expected_exception_message = 'size is inconsistent with indices: for dim 1, size is 2 but found index 3'
elif layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
def create_invalid_tensor(check_invariants=None):
shape = (2, 2)
compressed_indices = torch.tensor([0, 0, 1])
invalid_plain_indices = torch.tensor([3]) # index is out of range
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
values = torch.tensor([[[1]]])
else:
values = torch.tensor([1])
if check_invariants is None:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout)
else:
return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, layout=layout,
check_invariants=check_invariants)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
expected_exception_message = r'`0 <= col_indices < ncols` is not satisfied.'
else:
expected_exception_message = r'`0 <= row_indices < nrows` is not satisfied.'
else:
raise NotImplementedError(layout)
# First, consider the case where invariant checks are disabled
# "globally" (read: within the context of this test method
# caller) as defined by check_sparse_tensor_invariants(False)
# decorator:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Enable the invariant checks in a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Leaving the local context must restore the "global" state of
# the invariant check feature:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are disabled by default, we can
# create an invalid sparse tensor without raising an
# exception:
r = create_invalid_tensor()
self.assertEqual(r.layout, layout)
# Or, when disabling the invariants check explicitly:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Enabling invariant check via constructor's optional argument
# will raise an exception when sparse tensor invariants are
# violated:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# Check that the global invariant check flag has been restored
# after raising the exception above:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Next, consider the case where invariant checks are enabled
# within a local context:
with torch.sparse.check_sparse_tensor_invariants():
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Since invariant checks are now enabled by default, an
# attempt to create an invalid sparse tensor will lead to
# an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor()
# Similarly, when enabling the invariant checks
# explicitly, invalid sparse tensor construction will lead
# to an exception:
with self.assertRaisesRegex(RuntimeError, expected_exception_message):
create_invalid_tensor(check_invariants=True)
# However, invariants check can be disabled via
# constructor's optional argument so that the invalid
# tensor is succesfully constructed:
r = create_invalid_tensor(check_invariants=False)
self.assertEqual(r.layout, layout)
# Check that the invariant check flag has been restored
# when leaving the constructor:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Double-check restoring the global state when leaving the
# local context:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Test nesting of pre-defined context managers
check_ctx = torch.sparse.check_sparse_tensor_invariants(True)
no_check_ctx = torch.sparse.check_sparse_tensor_invariants(False)
with check_ctx:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with no_check_ctx:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
# Test an attempt to re-use an activate context manager instance
check_ctx2 = torch.sparse.check_sparse_tensor_invariants(True)
with check_ctx:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with no_check_ctx:
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with self.assertRaisesRegex(RuntimeError, "This context manager instance is already activated."
" Use a different context manager instance for context nesting"):
with check_ctx:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
with check_ctx2:
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertTrue(torch.sparse.check_sparse_tensor_invariants.is_enabled())
self.assertFalse(torch.sparse.check_sparse_tensor_invariants.is_enabled())
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseAny(TestCase):
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
class TestSparseAny(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
specific_constructor
|
def specific_constructor(*args, **kwargs):
if layout is torch.sparse_csr:
return torch.sparse_csr_tensor(*args, **kwargs)
elif layout is torch.sparse_csc:
return torch.sparse_csc_tensor(*args, **kwargs)
elif layout is torch.sparse_bsc:
return torch.sparse_bsc_tensor(*args, **kwargs)
elif layout is torch.sparse_bsr:
return torch.sparse_bsr_tensor(*args, **kwargs)
elif layout is torch.sparse_coo:
return torch.sparse_coo_tensor(*args, **kwargs)
else:
raise NotImplementedError(layout)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
identity
|
def identity(x):
return x
for func in (torch.Tensor.to_dense,
torch.Tensor.sum,
identity,
torch.Tensor.to_sparse,
torch.Tensor.values,
):
for x in self.generate_simple_inputs(
layout,
device=device,
dtype=torch.float64,
# TODO: fix gh-104868 to enable batched samples:
enable_batch=layout not in sparse_compressed_layouts,
enable_hybrid=not (
layout in sparse_compressed_layouts and (
# FIXME: RuntimeError: sparse_mask(): the
# number of sparse dimensions in `self`
# should match that of the `mask`. Got
# `self.sparse_dim() == 3` !=
# `mask.sparse_dim() == 2
func.__name__ == 'sum'
# FIXME: RuntimeError: expected
# col_indices to be a contiguous tensor
# per batch
or func.__name__ == 'to_sparse'
))):
if layout is torch.sparse_coo and func.__name__ == 'values':
x = x.coalesce()
gradcheck(func, x.requires_grad_(True), masked=masked, fast_mode=fast_mode)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
test_constructor_mismatched_pinned_memory
|
def test_constructor_mismatched_pinned_memory(self, device, layout):
"""Test the failure to construct sparse tensor from indices and values
that have different pinning states.
"""
def generic_constructor(*args, **kwargs):
if layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
kwargs.update(layout=layout)
return torch.sparse_compressed_tensor(*args, **kwargs)
elif layout is torch.sparse_coo:
return torch.sparse_coo_tensor(*args, **kwargs)
else:
raise NotImplementedError(layout)
for args, kwargs in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_zero_sized=False, # pinning zero-sized tensors is a no-op
enable_batch=False, # TODO: remove after gh-104868 is resolved
output_tensor=False):
# indices are pinned, values is a non-pinned tensor
args1 = (args[0].pin_memory(), *args[1:])
# indices are non-pinned, values is a pinned tensor
args2 = (*args[:-1], args[-1].pin_memory())
with self.assertRaisesRegex(
RuntimeError, r"memory pinning of \w*indices \(=1\) must match memory pinning of values \(=0\)"):
generic_constructor(*args1, **kwargs)
with self.assertRaisesRegex(
RuntimeError, r"memory pinning of \w*indices \(=0\) must match memory pinning of values \(=1\)"):
generic_constructor(*args2, **kwargs)
# e.g., TestSparseUnaryUfuncsCPU and TestSparseUnaryUfuncsCUDA
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
class TestSparseAny(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
generic_constructor
|
# e.g., TestSparseUnaryUfuncsCPU and TestSparseUnaryUfuncsCUDA
instantiate_device_type_tests(TestSparseUnaryUfuncs, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseMaskedReductions, globals(), except_for='meta')
# e.g., TestSparseCPU and TestSparseCUDA
instantiate_device_type_tests(TestSparse, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseAny, globals(), except_for='meta')
if __name__ == '__main__':
run_tests()
|
def generic_constructor(*args, **kwargs):
if layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
kwargs.update(layout=layout)
return torch.sparse_compressed_tensor(*args, **kwargs)
elif layout is torch.sparse_coo:
return torch.sparse_coo_tensor(*args, **kwargs)
else:
raise NotImplementedError(layout)
if layout is torch.sparse_coo:
constructors = (specific_constructor,)
else:
constructors = (specific_constructor, generic_constructor)
for args, kwargs in self.generate_simple_inputs(
layout, device=device, dtype=torch.float64,
enable_batch=False, # TODO: remove after gh-104868 is resolved
output_tensor=False):
values_offset = 1 if layout is torch.sparse_coo else 2
for cnstr in constructors:
for requires_grad in (False, True):
values = args[values_offset].detach().requires_grad_(requires_grad)
args = (*args[:values_offset], values, *args[values_offset + 1:])
kwargs_ = dict(kwargs)
args_ = args + (kwargs_.pop('size'),)
sparse = cnstr(*args, **kwargs)
self.assertEqual(sparse.requires_grad, requires_grad)
if requires_grad:
for masked in (False, True):
if layout is torch.sparse_coo:
torch.autograd.gradcheck(
lambda i, v: cnstr(i, v, **kwargs).to_dense(masked_grad=masked),
args, masked=masked)
torch.autograd.gradcheck(
lambda i, v, sz: cnstr(i, v, sz, **kwargs_).to_dense(masked_grad=masked),
args_, masked=masked)
else:
if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} and 0:
# TODO: remove this if-block after gh-107370 is resolved
continue
torch.autograd.gradcheck(
lambda ci, pi, v: cnstr(ci, pi, v, **kwargs).to_dense(masked_grad=masked),
args, masked=masked)
torch.autograd.gradcheck(
lambda ci, pi, v, sz: cnstr(ci, pi, v, sz, **kwargs_).to_dense(masked_grad=masked),
args_, masked=masked)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse_csr.py
|
_check_cusparse_spgemm_available
|
def _check_cusparse_spgemm_available():
# cusparseSpGEMM was added in 11.0
version = _get_torch_cuda_version()
min_supported_version = (11, 0)
return version >= min_supported_version
|
def _check_cusparse_spgemm_available():
# cusparseSpGEMM was added in 11.0
return not TEST_WITH_ROCM
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
_check_cusparse_sddmm_available
|
def _check_cusparse_sddmm_available():
version = _get_torch_cuda_version()
# cusparseSDDMM was added in 11.2.1 but we don't have access to patch version
min_supported_version = (11, 3)
return version >= min_supported_version
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
# This should be just an import from test_linalg instead of code duplication
# but https://github.com/pytorch/pytorch/pull/63511#discussion_r733989701
|
def _check_cusparse_sddmm_available():
if TEST_WITH_ROCM:
return True
version = _get_torch_cuda_version()
# cusparseSDDMM was added in 11.2.1 but we don't have access to patch version
min_supported_version = (11, 3)
return version >= min_supported_version
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
# This should be just an import from test_linalg instead of code duplication
# but https://github.com/pytorch/pytorch/pull/63511#discussion_r733989701
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_semi_structured.py
|
forward
|
def forward(self, x):
x = self.linear(x)
x = x.contiguous()
return torch.nn.functional.relu(x)
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class Model(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
fn
|
def fn(x, e):
y = SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort(x)
y = y.t()
return x @ y
# Eager
output = fn(x, e)
output.backward(output)
# Torch compile
output = torch.compile(fn)(x, e)
output.backward(output)
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
setUp
|
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
super().setUp()
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
is_view_of
|
def is_view_of(base, other):
# a shameless copy of TestViewOps.is_view_of
if ((not other._is_view() or
other is base or
other._base is not base or
base.device != other.device)):
return False
if base.device.type == 'cpu' or base.device.type == 'cuda':
if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr():
return False
return True
kwargs = dict(device=device, dtype=dtype, index_dtype=index_dtype)
for sparse, dense in zip(self.generate_simple_inputs(layout, **kwargs),
self.generate_simple_inputs(torch.strided, **kwargs)):
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_batchdim = sparse.crow_indices().ndim - 1
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
n_batchdim = sparse.ccol_indices().ndim - 1
else:
assert 0 # unreachable
self.assertEqual(sparse, dense)
for dim in range(sparse.ndim):
if sparse.shape[dim] == 0:
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(sparse, dim, 0)
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(dense, dim, 0)
elif n_batchdim and dim >= n_batchdim and dim < n_batchdim + 2:
with self.assertRaisesRegex(
RuntimeError,
"selecting sparse dimensions is not implemented for batched sparse compressed tensors"):
torch.select_copy(sparse, dim, 0)
else:
for index in {0, sparse.shape[dim] // 2, sparse.shape[dim] - 1}:
dense_select = torch.select_copy(dense, dim, index)
sparse_select = torch.select_copy(sparse, dim, index)
self.assertEqual(sparse_select, dense_select)
self.assertFalse(is_view_of(sparse_select.values(), sparse.values()))
|
def is_view_of(base, other):
# a shameless copy of TestViewOps.is_view_of
if (
not other._is_view() or
other is base or
other._base is not base or
base.device != other.device
):
return False
if base.device.type in ('cpu', 'cuda'):
if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr():
return False
return True
kwargs = dict(device=device, dtype=dtype, index_dtype=index_dtype)
for sparse, dense in zip(self.generate_simple_inputs(layout, **kwargs),
self.generate_simple_inputs(torch.strided, **kwargs)):
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_batchdim = sparse.crow_indices().ndim - 1
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
n_batchdim = sparse.ccol_indices().ndim - 1
else:
assert 0 # unreachable
self.assertEqual(sparse, dense)
for dim in range(sparse.ndim):
if sparse.shape[dim] == 0:
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(sparse, dim, 0)
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(dense, dim, 0)
elif n_batchdim and dim >= n_batchdim and dim < n_batchdim + 2:
with self.assertRaisesRegex(
RuntimeError,
"selecting sparse dimensions is not supported for batched sparse compressed tensors"):
torch.select_copy(sparse, dim, 0)
else:
for index in {0, sparse.shape[dim] // 2, sparse.shape[dim] - 1}:
dense_select = torch.select_copy(dense, dim, index)
sparse_select = torch.select_copy(sparse, dim, index)
self.assertEqual(sparse_select, dense_select)
self.assertFalse(is_view_of(sparse_select.values(), sparse.values()))
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
_npref_block_addmm_addmv
|
def _npref_block_addmm_addmv(c, a, b, alpha, beta):
return alpha * (a @ b) + beta * c
class TestSparseCSR(TestCase):
def test_csr_stride(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have strides"):
a.stride()
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have strides"):
a.stride(-1)
def test_csr_storage(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Cannot access storage of SparseCsrTensorImpl"):
a.storage()
def test_csr_is_contiguous(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have is_contiguous"):
a.is_contiguous()
def test_csr_double_to_sparse_csr(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
a.to_sparse_csr().to_sparse_csr()
@all_sparse_compressed_layouts()
@parametrize("index_dtype", [torch.int32, torch.int64])
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_select(self, device, dtype, index_dtype, layout):
compressed_indices_mth = {
torch.sparse_csr: torch.Tensor.crow_indices,
torch.sparse_bsr: torch.Tensor.crow_indices,
torch.sparse_csc: torch.Tensor.ccol_indices,
torch.sparse_bsc: torch.Tensor.ccol_indices,
}[layout]
plain_indices_mth = {
torch.sparse_csr: torch.Tensor.col_indices,
torch.sparse_bsr: torch.Tensor.col_indices,
torch.sparse_csc: torch.Tensor.row_indices,
torch.sparse_bsc: torch.Tensor.row_indices,
}[layout]
create_tensor_mth = {
torch.sparse_csr: torch.sparse_csr_tensor,
torch.sparse_bsr: torch.sparse_bsr_tensor,
torch.sparse_csc: torch.sparse_csc_tensor,
torch.sparse_bsc: torch.sparse_bsc_tensor,
}[layout]
shape = (2, 3, 6, 10)
nnz = 6
blocksize = (2, 2) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
sparse = self.genSparseCompressedTensor(
shape, nnz, device=device, layout=layout, dtype=dtype, index_dtype=index_dtype, blocksize=blocksize)
comp_indices = compressed_indices_mth(sparse)
plain_indices = plain_indices_mth(sparse)
values = sparse.values()
# select from batch dimensions
sparse_selected12 = sparse.select(1, 2)
expected_sparse_selected12 = create_tensor_mth(comp_indices.select(1, 2).contiguous(),
plain_indices.select(1, 2).contiguous(),
values.select(1, 2).contiguous(),
size=(2, 6, 10),
dtype=dtype,
device=device)
self.assertEqual(expected_sparse_selected12, sparse_selected12)
# selecting rows/col with batch dims not allowed
sparse_non_batched = sparse[0, 0]
# select from sparse dimensions
for select_args in [(0, 0), (1, 1)]:
sparse_selected = sparse_non_batched.select(*select_args)
dense_selected = sparse_non_batched.to_dense().select(*select_args)
self.assertEqual(dense_selected, sparse_selected)
self.assertEqual(sparse[0, 0, 0, 0], sparse.to_dense()[0, 0, 0, 0])
# assigning to sparse through indexing is disabled
with self.assertRaisesRegex(TypeError, "Cannot assign to a sparse tensor"):
sparse[0, 0, 0, 0] = 99.0
# select from sparse dimensions without removing batch dims
msg = "selecting sparse dimensions is not implemented for batched sparse compressed tensors."
with self.assertRaisesRegex(RuntimeError, msg):
sparse.select(-2, 0)
with self.assertRaisesRegex(RuntimeError, msg):
sparse.select(-1, 0)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize(self, device, dtype):
def numel(tensor):
r = 1
for s in tensor.shape:
r *= s
return r
batch_shapes = [(), (2,), (2, 3)]
for index_dtype, b in zip([torch.int32, torch.int64], batch_shapes):
shape = (*b, 2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.numel(), numel(a))
new_shape = (*b, 4, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to larger shape doesn't add specified elements
self.assertEqual(a._nnz(), nnz)
self.assertEqual(a.numel(), numel(a))
new_shape = (*b, 1, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to smaller shape trims specified elements
self.assertEqual(a._nnz(), 5)
self.assertEqual(a.numel(), numel(a))
# trim batched dimensions
a.resize_(new_shape[-2], new_shape[-1])
self.assertEqual(a.shape, (new_shape[-2], new_shape[-1]))
self.assertEqual(a._nnz(), 5)
self.assertEqual(a.numel(), numel(a))
@skipMeta
@dtypes(torch.float, torch.bool)
@all_sparse_compressed_layouts()
def test_resize_as_sparse_compressed(self, device, dtype, layout):
def _check_resize_b_as_a(b, a):
br = b.clone()
br.resize_as_sparse_(a)
# shape is inherited from a
self.assertEqual(a.shape, br.shape)
# other metadata is not affected
self.assertEqual(b.layout, br.layout)
self.assertEqual(b.device, br.device)
self.assertEqual(b.dtype, br.dtype)
def _get_compressed_plain_inds(t):
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[t.layout]
return compressed_indices_mth(t), plain_indices_mth(t)
br_compressed_indices, br_plain_indices = _get_compressed_plain_inds(br)
br_values = br.values()
b_compressed_indices, b_plain_indices = _get_compressed_plain_inds(b)
a_compressed_indices, a_plain_indices = _get_compressed_plain_inds(a)
self.assertEqual(a_plain_indices.shape, br_plain_indices.shape)
self.assertEqual(a_compressed_indices.shape, br_compressed_indices.shape)
# We don't check the content of br_plain_indices and br_compressed_indices
# because it is not well-defined (the content depends on the original
# shape of `b` that `resize_as` ought to discard) nor needed (the
# subsequent operation likely updates the indices and values of `b` anyway).
# the device/dtype of indices should always be unaffected
self.assertEqual(b_plain_indices.dtype, br_plain_indices.dtype)
self.assertEqual(b_plain_indices.device, br_plain_indices.device)
self.assertEqual(b_compressed_indices.dtype, br_compressed_indices.dtype)
self.assertEqual(b_compressed_indices.device, br_compressed_indices.device)
# values are generated empty, shape is updated
self.assertEqual(a.values().shape, br_values.shape)
# the device/dtype of indices should always be unaffected
b_values = b.values()
self.assertEqual(b_values.dtype, br_values.dtype)
self.assertEqual(b_values.device, br_values.device)
# nnz will be picked up from a via new shape of values
self.assertEqual(a._nnz(), br._nnz())
# post resize the invariants of the layout are respected
torch._validate_sparse_compressed_tensor_args(br_compressed_indices, br_plain_indices, br_values, br.shape,
br.layout)
block_sparse = layout in (torch.sparse_bsr, torch.sparse_bsc)
shape = (2, 1, 6, 4)
nnz = 4
blocksize = (2, 1) if block_sparse else ()
for index_dtype in [torch.int32, torch.int64]:
a = self.genSparseCompressedTensor(shape,
layout=layout,
device=device,
index_dtype=index_dtype,
dtype=dtype,
nnz=nnz,
blocksize=blocksize)
# same size, resize should not trigger
b = self.genSparseCompressedTensor(shape,
layout=layout,
device=device,
index_dtype=index_dtype,
dtype=dtype,
nnz=nnz,
blocksize=blocksize)
# This test will not always trigger a resize, if the layouts are the same nothing should happen to b.
# The invariants of the function as checked should still hold
_check_resize_b_as_a(b, a)
# same ndim, but bigger, more nnz, different dtype, different blocksize if blocked
b = self.genSparseCompressedTensor(tuple(s * 2 for s in shape),
layout=layout,
device=device,
dtype=torch.chalf,
index_dtype=torch.int64 if index_dtype == torch.int32 else torch.int32,
nnz=nnz * 2,
blocksize=tuple(2 * bi for bi in blocksize))
_check_resize_b_as_a(b, a)
# different device, only check on cuda pass as we know we are testing in an environment
# that has multiple devices
# TODO: .cpu() does not seem to work correctly for sparse. Causes a call to `copy_` which
# complains about incompatible nnz between src and self?
if torch.device(device).type == 'cuda' and (layout not in (torch.sparse_bsc, torch.sparse_bsr)):
a_cpu = self.genSparseCompressedTensor(shape,
layout=layout,
device='cpu',
index_dtype=index_dtype,
dtype=dtype,
nnz=nnz,
blocksize=blocksize)
_check_resize_b_as_a(b, a)
# error on a strided
a_strided = a.to_dense()
with self.assertRaisesRegex(
RuntimeError, r'resize_as_sparse_compressed_: src expected sparse compressed tensor layout'):
b.resize_as_sparse_(a_strided)
# error on b strided
b_strided = b.to_dense()
with self.assertRaisesRegex(
RuntimeError, r'resize_as_sparse_compressed_: self expected sparse compressed tensor layout'):
b_strided.resize_as_sparse_(a)
# error if layout does not match, transpose induces layout flip
with self.assertRaisesRegex(RuntimeError,
r"resize_as_sparse_compressed_tensor_: self and src must have the same layout"):
b.transpose(-2, -1).resize_as_sparse_(a)
with self.assertRaisesRegex(RuntimeError,
r"resize_as_sparse_compressed_tensor_: self and src must have the same layout"):
b.resize_as_sparse_(a.transpose(-2, -1))
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "torch.resize_: Only batched sparse CSR matrices are supported"):
new_shape = (4,)
a.resize_(new_shape)
# resizing of columns to smaller size is not implemented
with self.assertRaisesRegex(
RuntimeError,
"torch.resize_: Resizing columns of sparse CSR tensors to a smaller value is not supported.",
):
new_shape = (2, 2)
a.resize_(new_shape)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_from_dense(self, device, dtype):
dense = torch.tensor([[4, 5, 0], [0, 0, 0], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 2, 2, 3], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([4, 5, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[0, 0, 0], [0, 0, 1], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 0, 1, 2], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([2, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([1, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 3, 6, 9], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 2] * 3, dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([2] * 9, dtype=dtype), sparse.values())
def _test_sparse_compressed_to_dense(self, device, dtype, layout):
compressed_format_str = str(layout)[-3:]
def to_compressed(t):
return getattr(t, f"to_sparse_{compressed_format_str}")()
def compressed_constructor(*input, **kwargs):
constructor = getattr(torch, f"sparse_{compressed_format_str}_tensor")
return constructor(*input, **kwargs)
def get_dense_shape(shape, batch_ndim):
if layout is torch.sparse_csc:
compressed_dims_slice = slice(batch_ndim + 1, batch_ndim - 1, -1)
else:
compressed_dims_slice = slice(batch_ndim, batch_ndim + 2)
return shape[:batch_ndim] + shape[compressed_dims_slice] + shape[batch_ndim + 2:]
def transpose(t, batch_ndim):
if layout is torch.sparse_csc:
return t.transpose(batch_ndim, batch_ndim + 1)
return t
mn = [5, 2, 0]
for (m, n) in itertools.product(mn, mn):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
sparse = to_compressed(dense)
self.assertEqual(sparse.to_dense(), dense)
batch_shape = (2, 3)
compressed_indices = torch.tensor([0, 3, 5], device=device).repeat(6, 1).reshape(*batch_shape, -1)
plain_indices = torch.tensor([0, 1, 2, 0, 1], device=device).repeat(6, 1).reshape(*batch_shape, -1)
values = torch.tensor([1, 2, 1, 3, 4], device=device, dtype=dtype).repeat(6, 1).reshape(*batch_shape, -1)
sparse = compressed_constructor(compressed_indices, plain_indices, values, dtype=dtype, device=device)
dense_shape = get_dense_shape(sparse.shape, len(batch_shape))
dense = torch.tensor([[1, 2, 1], [3, 4, 0]], dtype=dtype, device=device).repeat(6, 1).reshape(dense_shape)
self.assertEqual(sparse.to_dense(), transpose(dense, len(batch_shape)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_to_dense(self, device, dtype):
self._test_sparse_compressed_to_dense(device, dtype, torch.sparse_csr)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csc_to_dense(self, device, dtype):
self._test_sparse_compressed_to_dense(device, dtype, torch.sparse_csc)
@skipMeta
@skipCPUIfNoMklSparse
@coalescedonoff
@dtypes(torch.double)
def test_coo_to_csr_convert(self, device, dtype, coalesced):
with self.assertRaisesRegex(RuntimeError, "Input is supposed to be a vector"):
torch._convert_indices_from_coo_to_csr(
torch.randint(100, (5, 5), device=device),
size=100)
size = (5, 5)
sparse_dim = 2
nnz = 10
sparse_coo, _, _ = self.genSparseTensor(size, sparse_dim, nnz, coalesced, device, dtype)
sparse_csr = sparse_coo.to_sparse_csr()
self.assertTrue(sparse_csr.is_sparse_csr)
self.assertEqual(sparse_csr.to_dense(), sparse_coo.to_dense())
vec = torch.randn((5, 1), dtype=dtype, device=device)
coo_product = sparse_coo.matmul(vec)
csr_product = sparse_csr.matmul(vec)
self.assertEqual(coo_product, csr_product)
vec = torch.randn((100, 1), dtype=dtype, device=device)
index = torch.tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], dtype=torch.int32)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
coo = torch.sparse_coo_tensor(index, values, torch.Size([100, 100]), dtype=dtype, device=device)
csr = coo.to_sparse_csr()
self.assertEqual(coo.matmul(vec), csr.matmul(vec))
col_indices = torch.tensor([
31, 92, 65, 50, 34, 62, 22, 56, 74, 89
], dtype=torch.int64, device=device)
self.assertEqual(csr.col_indices(), col_indices)
values = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
self.assertEqual(csr.values(), values)
@parametrize("blocksize", [2, 4])
@dtypes((torch.double, torch.int32), (torch.double, torch.int64))
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@skipMeta
def test_csr_to_block_csr(self, device, dtypes, blocksize):
for shape in [(24, 24), (12, 24)]:
dtype, index_dtype = dtypes
m, k = shape
nnz = random.randint(0, m * k)
t = self.genSparseCSRTensor((m * blocksize, k * blocksize), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
st = sp.csr_matrix((t.values().cpu(), t.col_indices().cpu(), t.crow_indices().cpu()), shape=tuple(t.size()))
block_t = t.to_sparse_bsr((blocksize, blocksize))
self.assertEqual(block_t.values().dim(), 3)
self.assertTrue(block_t.layout == torch.sparse_bsr)
block_st = st.tobsr(blocksize=(blocksize, blocksize))
block_st.sort_indices()
self.assertEqual(block_t.values().cpu(), block_st.data)
self.assertEqual(block_t.col_indices().cpu(), torch.tensor(block_st.indices).to(index_dtype))
self.assertEqual(block_t.crow_indices().cpu(), torch.tensor(block_st.indptr).to(index_dtype))
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_csr_to_block_csr_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
nnz = 15
t = self.genSparseCSRTensor((16, 16), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, r"size \(16, 16\) with block size \(5, 5\)"):
block_t = t.to_sparse_bsr((5, 5))
# TODO: Support auto generation of device check for sparse tensors
# See: https://github.com/pytorch/pytorch/issues/59058
@onlyCUDA
@dtypes(torch.double)
def test_matmul_device_mismatch(self, device, dtype):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
for s, m1, m2 in itertools.product((cpu, cuda), repeat=3):
csr = m1.to_sparse()
if s.device == csr.device == m2.device:
torch.addmm(s, csr, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, csr, m2)
@skipCPUIfNoMklSparse
@skipCUDAIfNoSparseGeneric
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else []))
def test_csr_matvec(self, device, dtype):
if TEST_WITH_ROCM and (dtype == torch.half or dtype == torch.bfloat16):
self.skipTest("ROCm doesn't work with half dtypes correctly.")
side = 100
for index_dtype in [torch.int32, torch.int64]:
csr = self.genSparseCSRTensor((side, side), 1000, device=device, dtype=dtype, index_dtype=index_dtype)
vec = torch.randn(side, dtype=dtype, device=device)
res = csr.matmul(vec)
expected = csr.to_dense().matmul(vec)
self.assertEqual(res, expected)
bad_vec = torch.randn(side + 10, dtype=dtype, device=device)
err_msg = "size mismatch, got"
with self.assertRaisesRegex(RuntimeError, err_msg):
csr.matmul(bad_vec)
@onlyCUDA
# hmm, the test passes ok on CUDA when Rocm is not available:
@skipCUDAIfRocmVersionLessThan((5, 2))
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_baddbmm(self, device, dtype):
# TODO: disable the invariant checks within torch.baddbmm that
# constructs unconventional csr tensors leading to
# RuntimeError: tensor dimensionality must be sum of batch,
# base, and dense dimensionalities (=0 + 2 + 0) but got 3
# when invariant checking is enabled. When done, undecorate run_test.
@torch.sparse.check_sparse_tensor_invariants(enable=False)
def run_test(c, a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta, out=out)
expected = [torch.addmm(c[i], a, b[i], alpha=alpha, beta=beta) for i in range(c.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([2, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch dimension in the shape
a_batched = torch.sparse_csr_tensor(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k), check_invariants=False)
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((batch_size, m, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(c, a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
@onlyCUDA
@unittest.skipIf(TEST_WITH_ROCM, "Only CUDA 11+ is supported")
@skipCUDAIfNoSparseGeneric
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_bmm(self, device, dtype):
def run_test(a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.bmm(a_batched, b)
out = torch.empty_like(actual.mH if op_out and a.shape == b.shape else actual)
torch.bmm(a_batched, b, out=out)
expected = [torch.mm(a, b[i]) for i in range(b.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([2, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch
# dimension in the shape. It is unorthodox in PyTorch
# to represent a batch sparse tensor in this way,
# hence checking the tensor invariants is locally
# turned off.
a_batched = torch.sparse_csr_tensor(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k), check_invariants=False)
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
def run_test_block_addmm_addmv(self,
addmv_addmm,
c,
a,
b,
op_b=False,
op_out=False,
*,
dtype=None,
device=None,
ref=_npref_block_addmm_addmv):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = addmv_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
addmv_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
expected = ref(c, a, b, alpha, beta)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
# TODO: block_size 1 is broken
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else []))
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-5, torch.complex128: 1e-5,
torch.float16: 1e-3, torch.bfloat16: 1e-3})
def test_block_addmm(self, device, dtype, index_dtype, block_size, noncontiguous):
def make_transposed_addmm_op(f):
def tt(t):
if isinstance(t, torch.Tensor):
return t.transpose(-2, -1)
else:
# assume numpy/scipy spmatrix
return t.transpose()
@functools.wraps(f)
def wrapper(c, a, b, alpha=None, beta=None, out=None):
if out is not None:
# the ref takes no out kwarg
assert isinstance(out, torch.Tensor)
# tranpose inplace to propogate out to checking context
out.transpose_(-2, -1)
return f(tt(c), tt(b), tt(a), alpha=alpha, beta=beta, out=out)
else:
return f(tt(c), tt(b), tt(a), alpha=alpha, beta=beta)
return wrapper
def ref_sp_numpy(c, a, b, alpha=None, beta=None, out=None):
def prep_input(t):
def to_sp_block_compressed(t):
if t.layout is torch.sparse_bsc:
tt = t.transpose(-1, -2)
else:
tt = t
t_sp_bsr = sp.bsr_matrix(
(
tt.values().cpu().numpy(),
tt.col_indices().cpu().numpy(),
tt.crow_indices().cpu().numpy(),
),
shape=tt.shape,
)
if t.layout is torch.sparse_bsc:
return t_sp_bsr.transpose()
else:
return t_sp_bsr
if t.layout is not torch.strided:
return to_sp_block_compressed(t)
else:
return t.cpu().resolve_conj().numpy()
res = _npref_block_addmm_addmv(
*map(lambda t: prep_input(t), (c, a, b)),
alpha,
beta
)
if out is not None:
out.copy_(res)
return out
else:
return res
def ref_half_bfloat16(c, a, b, alpha=None, beta=None, out=None):
res = alpha * (a.to_dense().to(torch.float32) @ b.to_dense().to(torch.float32)).to(a.dtype) + beta * c
if out is not None:
out.copy_(res)
return out
else:
return res
if dtype in (torch.half, torch.bfloat16):
ref = ref_half_bfloat16
else:
ref = ref_sp_numpy
for (m, n, k) in itertools.product([2, 5], repeat=3):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size), check_invariants=False)
b = make_tensor((k * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
self.run_test_block_addmm_addmv(torch.addmm, c, a, b, op_b, op_out, dtype=dtype, device=device, ref=ref)
self.run_test_block_addmm_addmv(make_transposed_addmm_op(torch.addmm),
c,
a,
b,
op_b,
op_out,
dtype=dtype,
device=device,
ref=make_transposed_addmm_op(ref))
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_addmv(self, device, dtype, index_dtype, block_size, noncontiguous):
# TODO: Explicitly disable block size 1 support
# if (TEST_WITH_ROCM or not TEST_CUSPARSE_GENERIC) and block_size == 1:
# return
for (m, k) in itertools.product([2, 5], repeat=2):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size), check_invariants=False)
b = make_tensor((k * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
self.run_test_block_addmm_addmv(torch.addmv, c, a, b, dtype=dtype, device=device)
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_triangular_solve(self, device, dtype, index_dtype, block_size, noncontiguous):
def run_test(a, b, upper, transpose, unitriangular, op_out):
if unitriangular and self.device_type == 'cpu':
# TODO: When unitriangular=True results are not correct on CPU
return
if not upper and self.device_type == 'cpu':
# TODO: When upper=False some generated inputs might crash on CPU
return
actual = torch.triangular_solve(b, a, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if a._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
# TODO: replace with torch method when implemented to_dense() on block sparse tensor
a_bsr = sp.bsr_matrix(
(
a.values().cpu().numpy(),
a.col_indices().cpu().numpy(),
a.crow_indices().cpu().numpy(),
),
shape=a.shape,
)
expected_X, _ = torch.triangular_solve(
b,
torch.tensor(a_bsr.todense(), device=device),
transpose=transpose,
upper=upper,
unitriangular=unitriangular)
if expected_X.isnan().any():
# TODO: zeros on the diagonal are not handled for CPU path
# there's no way to query this info from MKL
if self.device_type == 'cuda' and not TEST_WITH_ROCM:
self.assertTrue(actual_X.isnan().any() or actual_X.isinf().any())
return
self.assertEqual(actual_X, expected_X)
out = torch.empty_like(b.mH if op_out and a.shape == b.shape else b)
torch.triangular_solve(
b, a,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, actual_X)
self.assertEqual(out, expected_X)
for (m, k) in itertools.product([2, 3], [1, 3]):
nnz = random.randint(0, m * m)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, m * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, m), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, m * block_size), check_invariants=False)
b = make_tensor((m * block_size, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
for (upper, unitriangular, transpose, op_out) in itertools.product([True, False], repeat=4):
run_test(a, b, upper, unitriangular, transpose, op_out)
@skipCPUIfNoMklSparse
@unittest.skipIf(TEST_WITH_ROCM, "Only CUDA 11+ is supported")
@dtypes(torch.double)
def test_mm(self, device, dtype):
def test_shape(di, dj, dk, nnz0=None, nnz1=None):
for index_dtype in [torch.int32, torch.int64]:
alpha = random.random()
beta = random.random()
def _test_addmm(t, x, y):
# TODO: addmm doesn't support strided result for sparse inputs.
# res = beta * t + alpha * (x @ y)
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, x.to_dense(), y.to_dense(), beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, x.to_dense(), y.to_dense())
self.assertEqual(res, expected)
def _test_mm(x, y):
res = torch.mm(x, y)
expected = torch.mm(x.to_dense(), y.to_dense())
if x.layout is torch.strided or y.layout is torch.strided:
self.assertEqual(res.layout, torch.strided)
else:
self.assertEqual(res.layout, torch.sparse_csr)
self.assertEqual(res.to_dense(), expected)
def _test(t, x, y):
_test_addmm(t, x, y)
_test_mm(x, y)
if nnz0 is None:
nnz0 = random.randint(di * dk // 2, di * dk)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = self.genSparseCSRTensor((di, dk), nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = torch.randn(dk, dj, dtype=dtype, device=device)
_test(t, x, y)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = self.genSparseCSCTensor((di, dk), nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = torch.randn(dk, dj, dtype=dtype, device=device)
_test(t, x, y)
if nnz1 is None:
nnz1 = random.randint(dk * dj // 2, dk * dj)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = torch.randn(di, dk, dtype=dtype, device=device)
y = self.genSparseCSRTensor((dk, dj), nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test(t, x, y)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = torch.randn(di, dk, dtype=dtype, device=device)
y = self.genSparseCSCTensor((dk, dj), nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test(t, x, y)
x_shape, y_shape = x.shape, y.shape
gen_csr_csc = [self.genSparseCSRTensor, self.genSparseCSCTensor]
# Test mm({CSR, CSC}, {CSR, CSC})
for gen_x, gen_y in itertools.product(gen_csr_csc, gen_csr_csc):
x = gen_x(x_shape, nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = gen_y(y_shape, nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test_mm(x, y)
def test_empty_inputs(lhs_layout, rhs_layout):
xd = torch.rand(10, 0, device=device, dtype=dtype)
yd = xd.transpose(-2, -1)
zd = torch.rand(0, 0, device=device, dtype=dtype)
xls, yls, zls = [t.to_sparse(layout=lhs_layout) for t in (xd, yd, zd)]
xrs, yrs, zrs = [t.to_sparse(layout=rhs_layout) for t in (xd, yd, zd)]
for ls, rs, ld, rd in [(xls, yrs, xd, yd), (xls, zrs, xd, zd), (zls, yrs, zd, yd), (zls, zrs, zd, zd)]:
res_sparse = ls @ rs
res_dense = ld @ rd
self.assertEqual(res_sparse.to_dense(), res_dense)
def test_orthogonal_inputs(lhs_layout, rhs_layout):
ones = torch.ones(2, 2, device=device, dtype=dtype)
zeros = torch.zeros(2, 2, device=device, dtype=dtype)
x = torch.cat((ones, zeros), -1).to_sparse(layout=lhs_layout)
y = torch.cat((zeros, ones), -2).to_sparse(layout=rhs_layout)
res = x @ y
res_expected = torch.zeros(*res.shape, device=device, dtype=dtype, layout=res.layout)
self.assertEqual(res, res_expected)
for lhs_layout, rhs_layout in itertools.product([torch.sparse_csr, torch.sparse_csc], repeat=2):
test_empty_inputs(lhs_layout, rhs_layout)
test_orthogonal_inputs(lhs_layout, rhs_layout)
for i in [2, 4]:
for j in [2, 4, 7]:
for k in [2, 3, 7]:
test_shape(i, j, k)
test_shape(4, 4, 4, 0, 0)
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_mm(self, device, dtype):
def test_shape(d1, d2, d3, nnz, transposed, index_dtype):
if transposed:
D = torch.randn(d3, d2, dtype=dtype, device=device).t_()
else:
D = torch.randn(d2, d3, dtype=dtype, device=device)
S = self.genSparseCSRTensor((d1, d2), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
S_dense = S.to_dense()
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype)
test_shape(7, 8, 9, 20, True, index_dtype)
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_addmm(self, device, dtype):
def test_shape(m, n, p, nnz, broadcast, index_dtype, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device)
D2 = make_tensor([m, p], dtype=dtype, device=device)
S = self.genSparseCSRTensor([n, m], nnz, dtype=dtype, device=device, index_dtype=index_dtype)
S_dense = S.to_dense()
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype, None)
test_shape(7, 8, 9, 20, True, index_dtype, None)
test_shape(7, 8, 9, 20, False, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, False, index_dtype, (1, 1))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 1))
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@sparse_compressed_nonblock_layouts()
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
def test_addmm_all_sparse_csr(self, device, dtype, layout):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="all_sparse")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="all_sparse")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=layout, mode="all_sparse")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=layout, mode="all_sparse")
@onlyCPU
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@sparse_compressed_nonblock_layouts()
def test_addmm_dense_result(self, device, dtype, layout):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="dense_result")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="dense_result")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=layout, mode="dense_result")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=layout, mode="dense_result")
@parametrize("k", [0, 1, 8])
@parametrize("n", [0, 1, 10])
@parametrize("m", [0, 1, 25])
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
def test_addmm_sizes_all_sparse_csr(self, device, dtype, m, n, k):
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
M = torch.randn(n, m, device=device).to(dtype).to_sparse_csr()
m1 = torch.randn(n, k + 1, device=device).to(dtype).to_sparse_csr()
m2 = torch.randn(k, m, device=device).to(dtype).to_sparse_csr()
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a)
else:
return torch.addmm(a, a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a.unsqueeze(0))
else:
return torch.addmm(a, a, a.unsqueeze(0))
def test3(*, is_sparse):
# the first input needs to be 1D or 2D
a = make_tensor((3, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a.unsqueeze(0), a_sparse, a)
else:
return torch.addmm(a.unsqueeze(0), a, a)
for test in (test1, test2, test3):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_mm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a)
else:
return torch.mm(a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a.unsqueeze(0))
else:
return torch.mm(a, a.unsqueeze(0))
for test in (test1, test2):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@dtypes(torch.float, torch.double)
def test_add(self, device, dtype):
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
@dtypes(torch.float, torch.double)
def test_mul(self, device, dtype):
# TODO: This whole test should be migrated to OpInfos
def _test_spadd_shape(fn, nnz, shape):
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
# Forward comparison
res_sparse_sparse = fn(y, x)
res_dense_sparse = fn(y.to_dense(), x)
res_sparse_dense = fn(y, x.to_dense())
expected = fn(y.to_dense(), x.to_dense()).to_sparse_csr()
self.assertEqual(res_sparse_sparse, expected)
# TODO: While result of mul(dense, csr) is csr, it is not fully compressed.
# That means it may contain materialized zeros, since the dense argument
# is converted according to the sparsity pattern of csr. In the future
# we might require the result to be fully compressed.
self.assertEqual(res_dense_sparse.to_dense(), expected.to_dense())
self.assertEqual(res_sparse_dense.to_dense(), expected.to_dense())
# Grad comparison
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
z = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
# csr * csr -> csr with csr, csr gradients
x_a = x.clone().requires_grad_()
y_a = y.clone().requires_grad_()
fn(y_a, x_a).backward(z)
x_dense_a = x.to_dense().requires_grad_()
y_dense_a = y.to_dense().requires_grad_()
fn(y_dense_a, x_dense_a).backward(z.to_dense())
self.assertEqual(x_a.grad.layout, torch.sparse_csr)
self.assertEqual(y_a.grad.layout, torch.sparse_csr)
self.assertEqual(x_a.grad.to_dense(), x_dense_a.grad)
self.assertEqual(y_a.grad.to_dense(), y_dense_a.grad)
# TODO: Currently strided Tensors cannot have csr gradients
# dense * csr -> csr with csr, dense gradients
x_a = x.clone().requires_grad_()
y_a = y.to_dense().clone().requires_grad_()
err_msg = "Function MulBackward0 returned an invalid gradient at index 0 - expected layout Strided but got SparseCsr"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(y_a, x_a).backward(z)
# csr * dense -> csr with dense, csr gradients
x_a = x.to_dense().clone().requires_grad_()
y_a = y.clone().requires_grad_()
err_msg = "Function MulBackward0 returned an invalid gradient at index 1 - expected layout Strided but got SparseCsr"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(y_a, x_a).backward(z)
_test_spadd_shape(torch.mul, 100, [100, 100])
_test_spadd_shape(torch.mul, 0, [100, 100])
_test_spadd_shape(torch.mul, 100, [100, 1])
_test_spadd_shape(torch.mul, 100, [1, 100])
# TODO: enable hybrid once to_dense supports it
@parametrize('enable_hybrid', [False])
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half))
def test_mul_scalar(self, layout, device, dtype, enable_hybrid):
for sparse in self.generate_simple_inputs(
layout, device=device, dtype=dtype, index_dtype=torch.int32, enable_hybrid=enable_hybrid):
for scalar_dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half):
# ComplexHalf is experimental
if dtype is torch.half and scalar_dtype.is_complex:
continue
scalar_t = torch.tensor(2, dtype=scalar_dtype)
for scalar in (scalar_t, scalar_t.item()):
res_out = sparse.mul(scalar)
self.assertEqual(res_out, scalar * sparse)
res_dense_out = sparse.to_dense().mul(scalar)
# BUG: dispatcher ignores mul.Scalar(Tensor, Scalar)
# This issues is circumvented in the mul(Tensor, Tensor) kernel.
self.assertEqual(res_out, res_dense_out)
if dtype == torch.result_type(sparse, scalar):
res_in_dense = sparse.to_dense().mul_(scalar)
res_in = sparse.clone().mul_(scalar)
self.assertEqual(res_in, res_in_dense)
self.assertEqual(res_out, res_in)
@skipCPUIfNoMklSparse
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add(self, device, dtype):
def run_test(m, n, index_dtype):
alpha = random.random()
nnz1 = random.randint(0, m * n)
nnz2 = random.randint(0, m * n)
nnz3 = random.randint(0, m * n)
if TEST_WITH_ROCM:
# ROCm fails when nnz = 0
nnz1, nnz2, nnz3 = max(1, nnz1), max(1, nnz2), max(1, nnz3)
S1 = self.genSparseCSRTensor([m, n], nnz1, dtype=dtype, device=device, index_dtype=index_dtype)
S2 = self.genSparseCSRTensor([m, n], nnz2, dtype=dtype, device=device, index_dtype=index_dtype)
S3 = self.genSparseCSRTensor([m, n], nnz3, dtype=dtype, device=device, index_dtype=index_dtype)
expected = torch.add(S1.to_dense(), S2.to_dense(), alpha=alpha)
actual = torch.add(S1, S2, alpha=alpha, out=S3)
self.assertEqual(actual.to_dense(), expected)
self.assertEqual(S3.to_dense(), expected)
for index_dtype in [torch.int32, torch.int64]:
for m, n in itertools.product([3, 5], [3, 5]):
run_test(m, n, index_dtype)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add_errors(self, device, dtype):
def run_test(index_type):
a = self.genSparseCSRTensor((2, 2), 3, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor((2, 1), 2, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "Expected input tensors to have the same shape"):
torch.add(a, b)
for index_dtype in [torch.int32, torch.int64]:
run_test(index_dtype)
@skipCPUIfNoMklSparse
@skipCUDAIf(
not _check_cusparse_triangular_solve_available(),
"cuSparse Generic API SpSV is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sparse_triangular_solve(self, device, dtype):
def run_test(n, k, upper, unitriangular, transpose, zero):
if not unitriangular:
triangle_function = torch.triu if upper else torch.tril
else:
# Make sure diagonal elements are not materialized.
# This is to exercise `unitriangular=True` not relying on
# explicit presence of these indices.
if upper:
def remove_diagonal(t):
return t.triu(-1)
else:
def remove_diagonal(t):
return t.tril(-1)
triangle_function = remove_diagonal
make_A = torch.zeros if zero else make_tensor
A = make_A((n, n), dtype=dtype, device=device)
A = triangle_function(A)
A_sparse = A.to_sparse_csr()
B = make_tensor((n, k), dtype=dtype, device=device)
expected = torch.triangular_solve(B, A, upper=upper, unitriangular=unitriangular, transpose=transpose)
expected_X = expected.solution
actual = torch.triangular_solve(B, A_sparse, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if A_sparse._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
self.assertEqual(actual_X, expected_X)
# test out with C contiguous strides
out = torch.empty_strided((n, k), (k, 1), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
# test out with F contiguous strides
out = torch.empty_strided((n, k), (1, n), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), (1, n))
# test out with discontiguous strides
out = torch.empty_strided((2 * n, k), (1, 2 * n), dtype=dtype, device=device)[::2]
if n > 0 and k > 0:
self.assertFalse(out.is_contiguous())
self.assertFalse(out.t().is_contiguous())
before_stride = out.stride()
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), before_stride)
ks = [0, 1, 3]
ns = [5, 3, 0]
for (k, n), (upper, unitriangular, transpose, zero) in itertools.product(itertools.product(ks, ns),
itertools.product([True, False], repeat=4)):
run_test(n, k, upper, unitriangular, transpose, zero)
@skipCUDAIfRocm
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm(self, device, dtype):
def run_test(c, a, b, op_a, op_b, *, alpha=None, beta=None):
if dtype.is_complex:
alpha = random.random() + 0.3j if alpha is None else alpha
beta = random.random() + 0.6j if beta is None else beta
else:
alpha = random.random() if alpha is None else alpha
beta = random.random() if beta is None else beta
if op_a and a.shape == b.shape:
a = a.mH
if op_b and a.shape == b.shape:
b = b.mH
actual = torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.sparse_csr_tensor(
*map(torch.clone, (actual.crow_indices(), actual.col_indices())),
torch.empty_like(actual.values()),
size=actual.shape
)
torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
spy_c = torch.sparse_csr_tensor(c.crow_indices(), c.col_indices(), torch.ones_like(c.values()), size=c.shape)
expected = alpha * (a @ b) * spy_c.to_dense() + beta * c.to_dense()
self.assertEqual(actual.to_dense(), out.to_dense())
self.assertEqual(actual.to_dense(), expected)
mnk = list(itertools.product([2, 5], repeat=3))
# Add a test case for size 0 a and b tensors
mnk = mnk + [(5, 5, 0)]
batch_shapes = [(), (2,), (2, 3)]
tf = [True, False]
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), b, noncontiguous, bcast_c in itertools.product(mnk, batch_shapes, tf, tf):
if bcast_c and len(b) == 0:
continue
nnz = random.randint(0, m * n)
c_batch = () if bcast_c else b
c = self.genSparseCSRTensor((*c_batch, m, n), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a = make_tensor((*b, m, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
b = make_tensor((*b, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_a, op_b in itertools.product([True, False], repeat=2):
run_test(c, a, b, op_a, op_b)
@skipCUDAIfRocm
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_autograd(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
samples = list(sample_inputs_sparse_sampled_addmm(None, device, dtype, requires_grad=True))
for sample, dense_covector in zip(samples, [True, False]):
c = sample.input
a = sample.args[0]
b = sample.args[1]
# Compute sparse result
output = torch.sparse.sampled_addmm(c, a, b, **sample.kwargs)
covector = torch.randn_like(output).to_dense() if dense_covector else torch.randn_like(output)
output.backward(covector)
# Compute dense result and compare with sparse result
c1, a1, b1 = map(lambda x: x.detach().to_dense().requires_grad_(True), [c, a, b])
dense_output = sample.kwargs['alpha'] * (a1 @ b1) * torch.ones_like(c).to_dense() + sample.kwargs['beta'] * c1
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
self.assertEqual(c.grad, c1.grad)
self.assertEqual(a.grad, a1.grad)
self.assertEqual(b.grad, b1.grad)
@skipCUDAIfRocm
@onlyCUDA
@skipCUDAIf(True, "Causes CUDA memory exception, see https://github.com/pytorch/pytorch/issues/72177")
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm_zero_sized(self, device, dtype):
def run_test(c, a, b):
actual = torch.sparse.sampled_addmm(c, a, b)
self.assertEqual(actual.shape, c.shape)
for m, n, k in itertools.product([0, 5], repeat=3):
c = torch.empty(m, n, dtype=dtype, device=device, layout=torch.sparse_csr)
a = make_tensor((m, k), dtype=dtype, device=device)
b = make_tensor((k, n), dtype=dtype, device=device)
run_test(c, a, b)
@onlyCUDA
@skipCUDAIf(
not (TEST_WITH_ROCM or _check_cusparse_sddmm_available()),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse sampled versions
# import re
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"cannot be multiplied"):
torch.sparse.sampled_addmm(a_sparse, a, a)
# mat1 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a[..., 0, :], a)
# mat2 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a, a[..., 0, :])
a = make_tensor((2, 2), dtype=dtype, device=device)
b = make_tensor((3, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self.shape\[-2\] must match mat1.shape\[-2\]"):
torch.sparse.sampled_addmm(b_sparse, a, a)
b = make_tensor((2, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self.shape\[-1\] must match mat2.shape\[-1\]"):
torch.sparse.sampled_addmm(b_sparse, a, a)
a = make_tensor((2, 2), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a_sparse, a_sparse)
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a, a_sparse)
@onlyCPU
@dtypes(torch.float32, torch.float64, torch.bfloat16)
def test_sparse_mm_reduce_sum(self, device, dtype):
def run_test(m, n, k, nnz, train):
sparse = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=torch.int64)
dense = sparse.to_dense()
mat = torch.randn(k, n, dtype=dtype)
ref_mat = mat.clone()
if train:
sparse.requires_grad_()
mat.requires_grad_()
dense.requires_grad_()
ref_mat.requires_grad_()
ref_out = torch.mm(dense, ref_mat)
out = torch.sparse.mm(sparse, mat, 'sum')
self.assertEqual(out, ref_out)
if train:
ref_out.sum().backward()
out.sum().backward()
grad_input = sparse.grad
ref_grad_input = dense.grad
grad_mat = mat.grad
ref_grad_mat = ref_mat.grad
self.assertEqual(grad_input.to_dense(), ref_grad_input)
self.assertEqual(grad_mat, ref_grad_mat)
run_test(4, 5, 4, 10, False)
run_test(4, 4, 4, 16, True)
@onlyCPU
@dtypes(torch.float32, torch.float64, torch.bfloat16)
def test_sparse_mm_reduce(self, device, dtype):
def run_test(m, n, k, nnz, reduce_type, index_dtype, train):
csr = self.genSparseCSRTensor((m, n), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
mat = torch.randn(n, k, dtype=dtype)
ref_mat = mat.clone()
ref_values = csr.values().clone()
out_int32 = index_dtype == torch.int32
coo_indices = torch._convert_indices_from_csr_to_coo(
csr.crow_indices(),
csr.col_indices(),
out_int32=out_int32)
row, col = coo_indices[0], coo_indices[1]
def ref(row, col, val, mat):
out = torch.zeros([m, k], dtype=dtype)
weight = mat.index_select(0, col)
src = weight.mul(val.view(-1, 1))
index = row.view(-1, 1).expand_as(weight)
index = index.to(dtype=torch.int64)
# scatter_reduce expect index to be int64
out.scatter_reduce_(0, index, src, reduce=reduce_type, include_self=False)
return out
if train:
csr.requires_grad_()
mat.requires_grad_()
ref_values.requires_grad_()
ref_mat.requires_grad_()
ref_out = ref(row, col, ref_values, ref_mat)
out = torch.sparse.mm(csr, mat, reduce_type)
self.assertEqual(out, ref_out)
if train and dtype is not torch.bfloat16:
ref_out.sum().backward()
out.sum().backward()
grad_values = csr.grad.values()
grad_weight = mat.grad
ref_grad_values = ref_values.grad
ref_grad_weight = ref_mat.grad
self.assertEqual(grad_values, ref_grad_values)
self.assertEqual(grad_weight, ref_grad_weight)
for train in [False, True]:
for index_dtype in [torch.int32, torch.int64]:
for reduce_type in ["sum", "mean", "amax", "amin"]:
# by setting nnz < M, create empty rows
run_test(3, 4, 11, 1, reduce_type, index_dtype, train)
run_test(3, 4, 11, 6, reduce_type, index_dtype, train)
run_test(3, 4, 11, 12, reduce_type, index_dtype, train)
# we are doing blocking with 4x vector length in the kernel,
# so need to test when K > 4x vector length
run_test(4, 7, 33, 13, reduce_type, index_dtype, train)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse()
csr_sparse = coo_sparse.to_sparse_csr()
self.assertEqual(csr_sparse.to_dense(), dense)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_csr_coo_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
csr_sparse = dense.to_sparse_csr()
coo_sparse = csr_sparse.to_sparse()
self.assertEqual(coo_sparse.to_dense(), dense)
# Currently, there is no rule in PyTorch for filling zeros in the outputs
# from operations on Sparse CSR tensors. Hence only those operators are supported
# which have 0->0 correspondence, example: sin(0) = 0, tan(0) = 0 but
# cos(0) = 1 (and hence it's not supported).
# Note: here, we do this test only for unary operators
@ops(sparse_csr_unary_ufuncs)
def test_zero_to_zero_correspondence_unary(self, device, dtype, op):
zero = torch.zeros((1, 2), dtype=dtype, device=device)
tensor_explicit_zeros = torch.sparse_csr_tensor([0, 1], [1], [0], dtype=dtype, device=device)
output_zero = op(zero)
expected_zero = zero.to(output_zero.dtype)
output_explicit_zeros = op(tensor_explicit_zeros).to_dense()
expected_explicit_zeros = tensor_explicit_zeros.to_dense().to(output_explicit_zeros.dtype)
for (output, expected) in [
(output_zero, expected_zero),
(output_explicit_zeros, expected_explicit_zeros)
]:
self.assertEqual(output, expected, f"This operator ({op.name}) should not be supported for "
"Sparse CSR as it breaks 0->0 correspondence.")
for inp in [zero.to_sparse_csr(), tensor_explicit_zeros]:
self.assertEqual(op(inp).values().numel(), inp.values().numel(),
f"{op.name} fails to preserve sparsity pattern.")
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_out(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = self.genSparseCSRTensor(sample.input.size(), sample.input._nnz(),
device=sample.input.device, dtype=expect.dtype,
index_dtype=sample.input.crow_indices().dtype)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_inplace(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if op.inplace_variant is None:
self.skipTest("Skipped! Inplace variant not supported!")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
if sample.input.is_complex() and op.name == "abs":
with self.assertRaisesRegex(RuntimeError, "not supported"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@ops(sparse_csr_unary_ufuncs, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble])
def test_autograd_sparse_csr_unary(self, device, dtype, op):
if op.name not in UNARY_EWISE_CSR_ALLOW_AUTOGRAD:
self.skipTest(f"Skipped! Unary op {op.name} not supported with CSR input and autograd")
samples = list(op.sample_inputs(device, dtype))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
for sample in samples:
# We must skip samples of low dimensionality, we can't covert them to sparsed compressed layouts
if sample.input.ndim < 2:
continue
sparse_input = sample.input.to_sparse_csr().requires_grad_(True)
def fn(input):
output = op.gradcheck_wrapper(op.get_op(), input, *sample.args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# Compute sparse result
output = fn(sparse_input)
covector = torch.randn_like(output)
output.backward(covector)
self.assertTrue(torch.is_tensor(sparse_input.grad))
self.assertTrue(sparse_input.grad.is_sparse_csr)
# Compute dense result and compare with sparse result
dense_input = sparse_input.detach().to_dense().requires_grad_(True)
dense_output = fn(dense_input)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
self.assertEqual(sparse_input.grad, dense_input.grad)
@skipCUDAIfRocm
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float64)
def test_autograd_dense_output_addmm(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
samples = list(sample_inputs_addmm(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
a = sample.args[0].relu().to_sparse_csr()
# This path tests the autograd path wrt dense inputs
for addmm in [torch.addmm, torch.sparse.addmm]:
def fn(c, b):
output = addmm(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
# Now test the autograd path wrt sparse inputs
for reverse in [True, False]:
c, b = sample.input, sample.args[1]
if reverse and a.shape != b.shape:
continue
def fn(a):
inputs = (c, b, a) if reverse else (c, a, b)
output = addmm(*inputs, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# gradcheck doesn't work for sparse CSR yet, compare against dense path
# Compute sparse result
a = a.detach().requires_grad_(True)
output = fn(a)
covector = torch.randn_like(output)
output.backward(covector)
self.assertTrue(torch.is_tensor(a.grad))
if addmm == torch.sparse.addmm:
self.assertTrue(a.grad.is_sparse_csr)
else:
self.assertTrue(a.grad.layout == torch.strided)
# Compute dense result and compare with sparse result
dense_a = a.detach().to_dense().requires_grad_(True)
dense_output = fn(dense_a)
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
if addmm == torch.sparse.addmm:
self.assertEqual(a.grad, dense_a.grad.sparse_mask(a))
else:
self.assertEqual(a.grad, dense_a.grad)
@skipCUDAIfRocm
@skipCPUIfNoMklSparse
@dtypes(torch.float64)
def test_autograd_dense_output_addmv(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
samples = list(sample_inputs_addmv(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
a = sample.args[0].to_sparse_csr().detach()
def fn(c, b):
output = torch.addmv(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@ops(binary_ops_with_dense_output, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, ])
def test_autograd_dense_output(self, device, dtype, op):
if op.name == "mv" and no_mkl_sparse and self.device_type == 'cpu':
self.skipTest("MKL Sparse is not available")
if op.name == "mv" and TEST_WITH_ROCM and self.device_type == 'cuda':
# mv currently work only on CUDA
self.skipTest("ROCm is not supported")
samples = list(op.sample_inputs(device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
# Here we assume that the signature is op(sparse_input, dense_input) -> dense_output
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
sparse_input = sample.input.to_sparse_csr().detach()
def fn(*args):
output = op.gradcheck_wrapper(op.get_op(), sparse_input, *args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, sample.args, fast_mode=True))
# noncontiguous
args = [make_tensor(a.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True) for a in sample.args]
self.assertTrue(torch.autograd.gradcheck(fn, args, fast_mode=True))
@dtypes(*all_types_and_complex())
def test_direct_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse_coo()
self.assertEqual(coo_sparse.to_sparse_csr().to_sparse_coo(), coo_sparse)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sum(self, device, dtype):
def run_test(shape, nnz, index_type):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.sum(), a.values().sum())
if dtype in floating_types():
a.requires_grad_(True)
a.sum().backward()
self.assertEqual(a.grad, torch.ones(shape, dtype=dtype, device=device))
for shape, index_dtype in itertools.product(
[(10, 5), (10, 10)],
[torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, max(shape), index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@all_sparse_compressed_layouts()
def test_transpose(self, device, dtype, layout):
def _check_transpose_view(subject, transpose):
self.assertTrue(transpose.values()._is_view())
self.assertTrue(transpose._is_view())
self.assertTrue(transpose._base is subject)
def _check_layout_invariants(transpose):
self.assertEqual(transpose.device, torch.device(device))
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[transpose.layout]
compressed_indices, plain_indices = compressed_indices_mth(transpose), plain_indices_mth(transpose)
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, transpose.values(),
transpose.shape, transpose.layout)
def check_good_transpose(subject, subject_dense, dim0, dim1, expected_layout):
transpose = subject.transpose(dim0, dim1)
# correct layout
self.assertEqual(transpose.layout, expected_layout)
# transpose must be return a view
_check_transpose_view(subject, transpose)
# result uses unsafe construction, so we check invariants
_check_layout_invariants(transpose)
self.assertEqual(transpose.to_dense(), subject_dense.transpose(dim0, dim1))
round_trip = transpose.transpose(dim0, dim1)
self.assertEqual(round_trip.layout, subject.layout)
# transpose must be return a view
_check_transpose_view(subject, round_trip)
# result uses unsafe construction, so we check invariants
_check_layout_invariants(round_trip)
self.assertEqual(round_trip.to_dense(), subject_dense)
def check_same_dim_transpose(subject, subject_dense, dim):
transpose = subject.transpose(dim, dim)
# correct layout
self.assertEqual(transpose.layout, subject.layout)
# transpose must be return a view
_check_transpose_view(subject, transpose)
# result uses unsafe construction, so we check invariants
_check_layout_invariants(transpose)
self.assertEqual(transpose.to_dense(), subject_dense)
def check_dim_type_mismatch_throws(subject, name0, dim0, name1, dim1):
mismatch_name = f"{dim0}\\({name0}\\) and {dim1}\\({name1}\\)"
err = r"transpose\(\): can only transpose dimensions of the same type \(Batch, Sparse, Dense\), got " + mismatch_name
with self.assertRaisesRegex(RuntimeError, err):
subject.transpose(dim0, dim1)
def run_test(shape, nnz, index_type, n_dense, blocksize=()):
subject = self.genSparseCompressedTensor(shape,
nnz,
layout=layout,
device=device,
index_dtype=index_type,
blocksize=blocksize,
dense_dims=n_dense,
dtype=dtype)
sparse0 = len(shape) - n_dense - 1
sparse1 = sparse0 - 1
dense0 = sparse0 + 1 if n_dense > 0 else None
dense1 = dense0 + 1 if n_dense > 1 else None
n_batch = len(shape) - n_dense - 2
batch0 = sparse1 - 1 if n_batch > 0 else None
batch1 = 0 if n_batch > 1 else None
sparse_dims = (sparse0, sparse1)
dense_dims = (dense0, dense1)
batch_dims = (batch0, batch1)
named0 = [(name, d[0]) for name, d in zip(["Batch", "Sparse", "Dense"], (batch_dims, sparse_dims, dense_dims))]
named1 = [(name, d[1]) for name, d in zip(["Batch", "Sparse", "Dense"], (batch_dims, sparse_dims, dense_dims))]
flipped_layout = {
torch.sparse_csr: torch.sparse_csc,
torch.sparse_csc: torch.sparse_csr,
torch.sparse_bsr: torch.sparse_bsc,
torch.sparse_bsc: torch.sparse_bsr
}[layout]
if n_dense > 0:
# expect all transpose to throw
for (name0, dim0), (name1, dim1) in itertools.product(named0, named1):
msg = r"transpose\(\): hybrid sparse compressed tensors with dense dimensions are not supported"
if (dim0 is not None) and (dim1 is not None):
with self.assertRaisesRegex(RuntimeError, msg):
subject.transpose(dim0, dim1)
else:
subject_dense = subject.to_dense()
for (name0, dim0), (name1, dim1) in itertools.product(named0, named1):
if dim0 is not None:
check_same_dim_transpose(subject, subject_dense, dim0)
if dim1 is not None:
if name0 == name1:
expected_layout = flipped_layout if name0 == "Sparse" else layout
check_good_transpose(subject, subject_dense, dim0, dim1, expected_layout)
else:
check_dim_type_mismatch_throws(subject, name0, dim0, name1, dim1)
# batch/sparse, sparse/dense only and full hybrid cases
shape_ndense = list(itertools.product([(2, 4, 6, 2), (10, 6, 4, 2), (2, 4, 4, 2, 6)], [0, 1, 2]))
# sparse only cases
shape_ndense += [[(4, 8), 0], [(2, 2), 0], [(8, 4), 0]]
for (shape, n_dense), index_dtype in itertools.product(shape_ndense, [torch.int32, torch.int64]):
n_batch = len(shape) - n_dense - 2
sparse_shape = shape[n_batch: n_batch + 2]
if layout in (torch.sparse_bsr, torch.sparse_bsc):
# for blocked all combinations of 2,1 shoudl be valid blocksizes
run_test(shape, 0, index_dtype, n_dense, blocksize=(2, 2))
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(2, 2))
run_test(shape, sparse_shape[0] * sparse_shape[1], index_dtype, n_dense, blocksize=(2, 2))
# repeat the realistic sparseity case with varried block sizes
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(2, 1))
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(1, 2))
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(1, 1))
else:
run_test(shape, 0, index_dtype, n_dense)
run_test(shape, max(sparse_shape), index_dtype, n_dense)
run_test(shape, sparse_shape[0] * sparse_shape[1], index_dtype, n_dense)
# TODO: This is a stopgap for a rigorous extension of our autograd tests
# to test the functionality of detach
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_exercise_detach(self, device, dtype):
shape = (3, 3)
nnz = 4
for index_dtype in [torch.int32, torch.int64]:
inp = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
detached_inp = inp.detach()
self.assertEqual(inp, detached_inp)
def _construct_sp_matrix(self, tensor, layout, blocksize=(2, 2)):
if tensor.layout in [torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.strided]:
tensor = tensor.to_dense()
else:
raise NotImplementedError(repr(tensor))
if layout is torch.sparse_csr:
return sp.csr_matrix(tensor.cpu().numpy())
if layout is torch.sparse_csc:
return sp.csc_matrix(tensor.cpu().numpy())
if layout is torch.sparse_bsr:
return sp.bsr_matrix(tensor.cpu().numpy(), blocksize=blocksize).sorted_indices()
# No native scipy BSC support?
raise NotImplementedError(repr(tensor))
@skipMeta
@all_sparse_compressed_layouts('to_layout')
@all_sparse_compressed_layouts('from_layout')
def test_compressed_layout_conversions_coverage(self, device, from_layout, to_layout):
"""This test performs a smoke test for covered conversion and verifies
that an exception is thrown for unsupported conversions.
TODO: This test covers a subset of
TestSparseAny.test_to_sparse tests and can be
eliminated. Keeping the test until the new
`Tensor.to_sparse(*, layout, blocksize)` has landed.
"""
allowed_pairwise_layouts_sets = {
frozenset({torch.sparse_csc}),
frozenset({torch.sparse_csr}),
frozenset({torch.sparse_csc, torch.sparse_csr}),
frozenset({torch.sparse_csc, torch.sparse_bsc}),
frozenset({torch.sparse_csc, torch.sparse_bsr}),
frozenset({torch.sparse_csr, torch.sparse_bsc}),
frozenset({torch.sparse_csr, torch.sparse_bsr}),
frozenset({torch.sparse_bsc}),
frozenset({torch.sparse_bsr}),
frozenset({torch.sparse_bsc, torch.sparse_bsr}),
}
block_layouts = (torch.sparse_bsr, torch.sparse_bsc)
def _to_from_layout(layout_a, layout_b, a):
expect_error = True
if {layout_a, layout_b} in allowed_pairwise_layouts_sets:
expect_error = False
# BSR -> CSR is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsr, torch.sparse_csr):
expect_error = True
# BSR -> CSC is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsr, torch.sparse_csc):
expect_error = True
# BSC -> CSR is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsc, torch.sparse_csr):
expect_error = True
# BSC -> CSC is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsc, torch.sparse_csc):
expect_error = True
# CSR -> BSR only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csr, torch.sparse_bsr):
if a.dim() > 2:
expect_error = True
# CSR -> BSC only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csr, torch.sparse_bsc):
if a.dim() > 2:
expect_error = True
# CSC -> BSR only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csc, torch.sparse_bsr):
if a.dim() > 2:
expect_error = True
# CSC -> BSC only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csc, torch.sparse_bsc):
if a.dim() > 2:
expect_error = True
blocksize_a = (1, 1) if layout_a in {torch.sparse_bsr, torch.sparse_bsc} else None
blocksize_b = (1, 1) if layout_b in {torch.sparse_bsr, torch.sparse_bsc} else None
b = a.to_sparse(layout=layout_a, blocksize=blocksize_a)
if expect_error:
with self.assertRaises(RuntimeError):
b.to_sparse(layout=layout_b, blocksize=blocksize_b)
else:
c = b.to_sparse(layout=layout_b, blocksize=blocksize_b)
self.assertEqual(a.to_dense(), c.to_dense())
# change of blocksize upon conversion is not yet supported.
if b.layout in block_layouts:
for block_layout in block_layouts:
with self.assertRaisesRegex(RuntimeError, "conversion from.*to.*is not implemented"):
b.to_sparse(layout=block_layout, blocksize=(3, 3))
batch_dims = [(), (2,), (2, 2), (2, 2, 2)]
sparse_dims = (6, 12)
for batch_dim in batch_dims:
a = make_tensor(batch_dim + sparse_dims, dtype=torch.float, device=device)
_to_from_layout(from_layout, to_layout, a)
@skipMeta
@all_sparse_compressed_layouts()
@batched_nonbatched()
@hybrid_nonhybrid()
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_dense_to_from_sparse_compressed(self, device, hybrid, batched, layout):
"""This test tests conversion from dense to/from CSR and CSC
by comparing to SciPy's implementation.
Here we test only those conversion combinations that SciPy
supports to ensure that PyTorch conversions are in the same
page with SciPy. Independent from SciPy, all conversion
combinations are tested in TestSparseAny.test_to_sparse.
"""
blocked_layouts = (torch.sparse_bsr, torch.sparse_bsc)
# helpers
def _check_against_scipy_matrix(pt_matrix, dense, blocksize, **kwargs):
# scipy has no bsc layout, so we check against the bsr layout of the tranposed dense
if layout == torch.sparse_bsc:
sp_matrix = self._construct_sp_matrix(dense.t(), layout=torch.sparse_bsr, blocksize=blocksize[::-1])
else:
sp_matrix = self._construct_sp_matrix(dense, layout=layout, blocksize=blocksize)
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
self.assertEqual(layout, pt_matrix.layout)
if layout == torch.sparse_bsc:
self.assertEqual(sp_matrix.shape[::-1], pt_matrix.shape)
else:
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
if layout == torch.sparse_bsc:
# we must tranpose the blocks before comparing
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values().transpose(-2, -1))
else:
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
def _check_hybrid_matrix(pt_matrix, dense, blocksize, **kwargs):
# Calculate COO indices for sparse matrix.
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
compressed_indices = compressed_indices_mth(pt_matrix)
plain_indices = plain_indices_mth(pt_matrix)
coo_indices = torch._convert_indices_from_csr_to_coo(compressed_indices, plain_indices)
row_indices, col_indices = {
torch.sparse_csr: (coo_indices[0, ], coo_indices[1, ]),
torch.sparse_csc: (coo_indices[1, ], coo_indices[0, ]),
torch.sparse_bsr: (coo_indices[0, ], coo_indices[1, ]),
torch.sparse_bsc: (coo_indices[1, ], coo_indices[0, ]),
}[pt_matrix.layout]
# If sparse matrix layout blocked, rearrange dense matrix
# so that the shape past first two dimensions match the
# shape of sparse matrix values.
dense_to_check = dense
if blocksize:
dense_shape = dense.shape
dense_to_check_shape = (dense.shape[0] // blocksize[0],
blocksize[0],
dense.shape[1] // blocksize[1],
blocksize[1]) + dense.shape[2:]
dense_to_check = dense_to_check.reshape(dense_to_check_shape).transpose(1, 2)
# Verify that non-zero values of the sparse matrix are
# equal to corresponding values of the dense matrix.
self.assertEqual(pt_matrix.values(), dense_to_check[row_indices, col_indices])
# Verify that the remaining elements of the dense matrix
# are 0, i.e. that dense are sparse matrix are fully
# equal.
mask = torch.ones_like(dense_to_check, dtype=torch.bool)
mask[row_indices, col_indices] = False
self.assertTrue(torch.all(torch.masked_select(dense_to_check, mask) == 0))
def _check_batched(pt_tensor, dense, check_batch=None, batch_shape=(), blocksize=(), **kwargs):
self.assertEqual(layout, pt_tensor.layout)
self.assertEqual(pt_tensor.shape, dense.shape)
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
for batch_index in np.ndindex(batch_shape):
pt_matrix = pt_tensor[batch_index]
dense_matrix = dense[batch_index]
dense_dim = pt_matrix.dim() - 2
dense_matrix_pt = dense_matrix.to_sparse(layout=layout,
blocksize=blocksize or None,
dense_dim=dense_dim)
# sanity check, selecting batch of to_<layout> and dense[batch].to_<layout> should give the same result
self.assertEqual(pt_matrix, dense_matrix_pt)
check_batch(pt_matrix, dense_matrix, blocksize, **kwargs)
def _generate_subject(sparse_shape, batch_shape, hybrid_shape):
shape = batch_shape + sparse_shape + hybrid_shape
n_batch_dim = len(batch_shape)
n_hybrid_dim = len(hybrid_shape)
# generate a dense tensor
dense = make_tensor(shape, dtype=torch.float, device=device)
# introduce some sparsty, mask is sparse shape, element applies to entire dense sub-tensor (hybrid) and is
# applied to each batch
mask = make_tensor(sparse_shape, dtype=torch.bool, device=device)
# manually expand to match hybrid shape
if hybrid:
mask = mask.view(sparse_shape + tuple(1 for _ in range(n_hybrid_dim)))
mask = mask.expand(sparse_shape + hybrid_shape)
# mask will broadcast over the batch dims if present
return dense * mask
# note: order is important here, the hybrid-ness decides the inner content check which is used to build the
# batched checker (if needed)
check_content = _check_against_scipy_matrix
if hybrid:
check_content = _check_hybrid_matrix
if batched:
check_content = functools.partial(_check_batched, check_batch=check_content)
sparse_sizes = [(6, 10), (0, 10), (6, 0), (0, 0)]
blocksizes = [(2, 2), (1, 1), (1, 2)] if layout in blocked_layouts else [()]
batch_sizes = [(3,), (1, 3), (2, 1, 3)] if batched else [()]
hybrid_sizes = [(4, ), (2, 2)] if hybrid else [()]
# general cases, always run
for sparse_shape, blocksize, batch_shape, hybrid_shape in itertools.product(
sparse_sizes, blocksizes, batch_sizes, hybrid_sizes):
dense = _generate_subject(sparse_shape, batch_shape, hybrid_shape)
sparse = dense.to_sparse(layout=layout, blocksize=blocksize or None, dense_dim=len(hybrid_shape))
check_content(sparse, dense, blocksize=blocksize, batch_shape=batch_shape, hybrid_shape=hybrid_shape)
dense_back = sparse.to_dense()
self.assertEqual(dense, dense_back)
# special cases for batched tensors
if batched:
# batched sparse tensors need only have the same number of non-zeros in each batch not nessesarily the
# same sparsity pattern in each batch
sparse_shape = sparse_sizes[0]
hybrid_shape = hybrid_sizes[0]
batch_shape = batch_sizes[0]
shape = batch_shape + sparse_shape + hybrid_shape
dense = make_tensor(shape, dtype=torch.float, device=device)
blocksize = blocksizes[0]
# number of elements/blocks in each batch (total not nnz)
batch_mask_shape = sparse_shape
if layout in blocked_layouts:
# if we are blocked the mask is genereated for the block valued elemetns
batch_mask_shape = sparse_shape[0] // blocksize[0], sparse_shape[1] // blocksize[1]
# random bool vector w/ length equal to max possible nnz for the sparse_shape
mask_source = make_tensor(batch_mask_shape, dtype=torch.bool, device=device).flatten()
n_batch = functools.reduce(lambda x, y: x * y, batch_shape, 1)
# stack random permutations of the source for each batch
mask = torch.stack([mask_source[torch.randperm(mask_source.numel())]
for _ in range(n_batch)], dim=0).reshape(batch_shape + batch_mask_shape)
if layout in blocked_layouts:
# for blocked we need to do a bit of extra work to expand the mask from blocked-space to element-space
mask_shape = mask.shape
mask = mask.view(mask_shape + (1, 1))
mask = mask.expand(mask_shape + blocksize)
mask = mask.transpose(-3, -2)
mask = mask.flatten(-4, -3).flatten(-2, -1)
mask_shape = mask.shape
mask = mask.view(mask_shape + (1,) * len(hybrid_shape))
mask = mask.expand(mask_shape + hybrid_shape)
dense = dense * mask
sparse = dense.to_sparse(layout=layout, blocksize=blocksize or None, dense_dim=len(hybrid_shape))
check_content(sparse, dense, blocksize=blocksize, batch_shape=batch_shape, hybrid_shape=hybrid_shape)
dense_back = sparse.to_dense()
self.assertEqual(dense, dense_back)
# if batches have different nnz we expect the conversion to throw
mask_0 = mask[0]
mask_1 = mask[0].clone().fill_(True)
mask_2 = mask[0].clone().fill_(False)
mask_true = mask_source.clone().fill_(True)
mask_false = mask_source.clone().fill_(False)
mask = torch.stack([(mask_0, mask_1, mask_2)[i % 3] for i in range(n_batch)], dim=0).reshape(batch_shape + mask_0.shape)
dense = make_tensor(shape, dtype=torch.float, device=device)
dense = dense * mask
msg = "Expect the same number of specified elements per batch."
with self.assertRaisesRegex(RuntimeError, msg):
dense.to_sparse(layout=layout, blocksize=blocksize or None)
# Should throw if there is a zero in the batch size
dense = make_tensor((0,) + shape, dtype=torch.float, device=device)
layout_code = str(layout).split("_")[-1]
msg = f"to_sparse_{layout_code}: Expected product of batch dimensions to be non-zero."
with self.assertRaisesRegex(RuntimeError, msg):
dense.to_sparse(layout=layout, blocksize=blocksize or None)
@skipMeta
@all_sparse_compressed_layouts()
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_sparse_to_sparse_compressed(self, device, dtype, coalesced, layout):
"""
This test tests conversion from COO to CSR and CSC and CSC to CSR and CSC
by comparing to SciPy's implementation.
Here we test only those conversion combinations that SciPy
supports to ensure that PyTorch conversions are in the same
page with SciPy. Independent from SciPy, all conversion
combinations are tested in TestSparseAny.test_to_sparse.
"""
if layout is torch.sparse_bsc:
# TODO: Remove this once support has been enabled
self.skipTest('NOT IMPL')
if layout is torch.sparse_bsr:
# TODO: Remove this once support has been enabled
self.skipTest('NOT IMPL')
for shape in [(0, 10), (6, 0), (6, 10), (0, 0)]:
sparse_dim = 2
nnz = shape[0] * shape[1] // 2
sparse, _, _ = self.genSparseTensor(shape, sparse_dim, nnz, coalesced, device, dtype)
sp_matrix = self._construct_sp_matrix(sparse, layout)
pt_matrix = sparse.to_sparse(layout=layout)
compressed_indices_mth = {
torch.sparse_csr: torch.Tensor.crow_indices,
torch.sparse_csc: torch.Tensor.ccol_indices,
}[layout]
plain_indices_mth = {
torch.sparse_csr: torch.Tensor.col_indices,
torch.sparse_csc: torch.Tensor.row_indices,
}[layout]
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
sparse_csc = sparse.to_sparse_csc()
sp_matrix = self._construct_sp_matrix(sparse_csc, layout)
pt_matrix = sparse_csc.to_sparse(layout=layout)
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
# e.g., TestSparseCSRCPU and TestSparseCSRCUDA
instantiate_device_type_tests(TestSparseCSR, globals())
instantiate_device_type_tests(TestSparseCompressed, globals())
if __name__ == '__main__':
run_tests()
|
def _npref_block_addmm_addmv(c, a, b, alpha, beta):
return alpha * (a @ b) + beta * c
class TestSparseCSR(TestCase):
def test_csr_stride(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have strides"):
a.stride()
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have strides"):
a.stride(-1)
def test_csr_storage(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Cannot access storage of SparseCsrTensorImpl"):
a.storage()
def test_csr_is_contiguous(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have is_contiguous"):
a.is_contiguous()
@onlyCPU
@largeTensorTest("20GB", "cpu")
def test_csr_nnz(self):
# Tests the limits of the number of specified elements in CSR tensors, see gh-102520.
for nnz in [0, 2**31]:
rows, cols = 1, max(nnz, 1)
crow_indices = torch.tensor([0, nnz], dtype=torch.int64)
col_indices = torch.arange(nnz, dtype=torch.int64)
values = torch.ones(nnz, dtype=torch.int8)
a = torch.sparse_csr_tensor(crow_indices, col_indices, values, (rows, cols))
self.assertEqual(a._nnz(), nnz)
def test_csr_double_to_sparse_csr(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
a.to_sparse_csr().to_sparse_csr()
@all_sparse_compressed_layouts()
@parametrize("index_dtype", [torch.int32, torch.int64])
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_select(self, device, dtype, index_dtype, layout):
compressed_indices_mth = {
torch.sparse_csr: torch.Tensor.crow_indices,
torch.sparse_bsr: torch.Tensor.crow_indices,
torch.sparse_csc: torch.Tensor.ccol_indices,
torch.sparse_bsc: torch.Tensor.ccol_indices,
}[layout]
plain_indices_mth = {
torch.sparse_csr: torch.Tensor.col_indices,
torch.sparse_bsr: torch.Tensor.col_indices,
torch.sparse_csc: torch.Tensor.row_indices,
torch.sparse_bsc: torch.Tensor.row_indices,
}[layout]
create_tensor_mth = {
torch.sparse_csr: torch.sparse_csr_tensor,
torch.sparse_bsr: torch.sparse_bsr_tensor,
torch.sparse_csc: torch.sparse_csc_tensor,
torch.sparse_bsc: torch.sparse_bsc_tensor,
}[layout]
shape = (2, 3, 6, 10)
nnz = 6
blocksize = (2, 2) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
sparse = self.genSparseCompressedTensor(
shape, nnz, device=device, layout=layout, dtype=dtype, index_dtype=index_dtype, blocksize=blocksize)
comp_indices = compressed_indices_mth(sparse)
plain_indices = plain_indices_mth(sparse)
values = sparse.values()
# select from batch dimensions
sparse_selected12 = sparse.select(1, 2)
expected_sparse_selected12 = create_tensor_mth(comp_indices.select(1, 2).contiguous(),
plain_indices.select(1, 2).contiguous(),
values.select(1, 2).contiguous(),
size=(2, 6, 10),
dtype=dtype,
device=device)
self.assertEqual(expected_sparse_selected12, sparse_selected12)
# selecting rows/col with batch dims not allowed
sparse_non_batched = sparse[0, 0]
# select from sparse dimensions
for select_args in [(0, 0), (1, 1)]:
sparse_selected = sparse_non_batched.select(*select_args)
dense_selected = sparse_non_batched.to_dense().select(*select_args)
self.assertEqual(dense_selected, sparse_selected)
self.assertEqual(sparse[0, 0, 0, 0], sparse.to_dense()[0, 0, 0, 0])
# assigning to sparse through indexing is disabled
with self.assertRaisesRegex(TypeError, "Cannot assign to a sparse tensor"):
sparse[0, 0, 0, 0] = 99.0
# select from sparse dimensions without removing batch dims
msg = "selecting sparse dimensions is not supported for batched sparse compressed tensors."
with self.assertRaisesRegex(RuntimeError, msg):
sparse.select(-2, 0)
with self.assertRaisesRegex(RuntimeError, msg):
sparse.select(-1, 0)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize(self, device, dtype):
def numel(tensor):
r = 1
for s in tensor.shape:
r *= s
return r
batch_shapes = [(), (2,), (2, 3)]
for index_dtype, b in zip([torch.int32, torch.int64], batch_shapes):
shape = (*b, 2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.numel(), numel(a))
new_shape = (*b, 4, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to larger shape doesn't add specified elements
self.assertEqual(a._nnz(), nnz)
self.assertEqual(a.numel(), numel(a))
new_shape = (*b, 1, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to smaller shape trims specified elements
self.assertEqual(a._nnz(), 5)
self.assertEqual(a.numel(), numel(a))
# trim batched dimensions
a.resize_(new_shape[-2], new_shape[-1])
self.assertEqual(a.shape, (new_shape[-2], new_shape[-1]))
self.assertEqual(a._nnz(), 5)
self.assertEqual(a.numel(), numel(a))
@skipMeta
@dtypes(torch.float, torch.bool)
@all_sparse_compressed_layouts()
def test_resize_as_sparse_compressed(self, device, dtype, layout):
def _check_resize_b_as_a(b, a):
br = b.clone()
br.resize_as_sparse_(a)
# shape is inherited from a
self.assertEqual(a.shape, br.shape)
# other metadata is not affected
self.assertEqual(b.layout, br.layout)
self.assertEqual(b.device, br.device)
self.assertEqual(b.dtype, br.dtype)
def _get_compressed_plain_inds(t):
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[t.layout]
return compressed_indices_mth(t), plain_indices_mth(t)
br_compressed_indices, br_plain_indices = _get_compressed_plain_inds(br)
br_values = br.values()
b_compressed_indices, b_plain_indices = _get_compressed_plain_inds(b)
a_compressed_indices, a_plain_indices = _get_compressed_plain_inds(a)
self.assertEqual(a_plain_indices.shape, br_plain_indices.shape)
self.assertEqual(a_compressed_indices.shape, br_compressed_indices.shape)
# We don't check the content of br_plain_indices and br_compressed_indices
# because it is not well-defined (the content depends on the original
# shape of `b` that `resize_as` ought to discard) nor needed (the
# subsequent operation likely updates the indices and values of `b` anyway).
# the device/dtype of indices should always be unaffected
self.assertEqual(b_plain_indices.dtype, br_plain_indices.dtype)
self.assertEqual(b_plain_indices.device, br_plain_indices.device)
self.assertEqual(b_compressed_indices.dtype, br_compressed_indices.dtype)
self.assertEqual(b_compressed_indices.device, br_compressed_indices.device)
# values are generated empty, shape is updated
self.assertEqual(a.values().shape, br_values.shape)
# the device/dtype of indices should always be unaffected
b_values = b.values()
self.assertEqual(b_values.dtype, br_values.dtype)
self.assertEqual(b_values.device, br_values.device)
# nnz will be picked up from a via new shape of values
self.assertEqual(a._nnz(), br._nnz())
# post resize the invariants of the layout are respected
torch._validate_sparse_compressed_tensor_args(br_compressed_indices, br_plain_indices, br_values, br.shape,
br.layout)
block_sparse = layout in (torch.sparse_bsr, torch.sparse_bsc)
shape = (2, 1, 6, 4)
nnz = 4
blocksize = (2, 1) if block_sparse else ()
for index_dtype in [torch.int32, torch.int64]:
a = self.genSparseCompressedTensor(shape,
layout=layout,
device=device,
index_dtype=index_dtype,
dtype=dtype,
nnz=nnz,
blocksize=blocksize)
# same size, resize should not trigger
b = self.genSparseCompressedTensor(shape,
layout=layout,
device=device,
index_dtype=index_dtype,
dtype=dtype,
nnz=nnz,
blocksize=blocksize)
# This test will not always trigger a resize, if the layouts are the same nothing should happen to b.
# The invariants of the function as checked should still hold
_check_resize_b_as_a(b, a)
# same ndim, but bigger, more nnz, different dtype, different blocksize if blocked
b = self.genSparseCompressedTensor(tuple(s * 2 for s in shape),
layout=layout,
device=device,
dtype=torch.chalf,
index_dtype=torch.int64 if index_dtype == torch.int32 else torch.int32,
nnz=nnz * 2,
blocksize=tuple(2 * bi for bi in blocksize))
_check_resize_b_as_a(b, a)
# different device, only check on cuda pass as we know we are testing in an environment
# that has multiple devices
# TODO: .cpu() does not seem to work correctly for sparse. Causes a call to `copy_` which
# complains about incompatible nnz between src and self?
if torch.device(device).type == 'cuda' and (layout not in (torch.sparse_bsc, torch.sparse_bsr)):
a_cpu = self.genSparseCompressedTensor(shape,
layout=layout,
device='cpu',
index_dtype=index_dtype,
dtype=dtype,
nnz=nnz,
blocksize=blocksize)
_check_resize_b_as_a(b, a)
# error on a strided
a_strided = a.to_dense()
with self.assertRaisesRegex(
RuntimeError, r'resize_as_sparse_compressed_: src expected sparse compressed tensor layout'):
b.resize_as_sparse_(a_strided)
# error on b strided
b_strided = b.to_dense()
with self.assertRaisesRegex(
RuntimeError, r'resize_as_sparse_compressed_: self expected sparse compressed tensor layout'):
b_strided.resize_as_sparse_(a)
# error if layout does not match, transpose induces layout flip
with self.assertRaisesRegex(RuntimeError,
r"resize_as_sparse_compressed_tensor_: self and src must have the same layout"):
b.transpose(-2, -1).resize_as_sparse_(a)
with self.assertRaisesRegex(RuntimeError,
r"resize_as_sparse_compressed_tensor_: self and src must have the same layout"):
b.resize_as_sparse_(a.transpose(-2, -1))
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "torch.resize_: Only batched sparse CSR matrices are supported"):
new_shape = (4,)
a.resize_(new_shape)
# resizing of columns to smaller size is not implemented
with self.assertRaisesRegex(
RuntimeError,
"torch.resize_: Resizing columns of sparse CSR tensors to a smaller value is not supported.",
):
new_shape = (2, 2)
a.resize_(new_shape)
@skipIfTorchDynamo()
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_from_dense(self, device, dtype):
dense = torch.tensor([[4, 5, 0], [0, 0, 0], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 2, 2, 3], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([4, 5, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[0, 0, 0], [0, 0, 1], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 0, 1, 2], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([2, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([1, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 3, 6, 9], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 2] * 3, dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([2] * 9, dtype=dtype), sparse.values())
def _test_sparse_compressed_to_dense(self, device, dtype, layout):
compressed_format_str = str(layout)[-3:]
def to_compressed(t):
return getattr(t, f"to_sparse_{compressed_format_str}")()
def compressed_constructor(*input, **kwargs):
constructor = getattr(torch, f"sparse_{compressed_format_str}_tensor")
return constructor(*input, **kwargs)
def get_dense_shape(shape, batch_ndim):
if layout is torch.sparse_csc:
compressed_dims_slice = slice(batch_ndim + 1, batch_ndim - 1, -1)
else:
compressed_dims_slice = slice(batch_ndim, batch_ndim + 2)
return shape[:batch_ndim] + shape[compressed_dims_slice] + shape[batch_ndim + 2:]
def transpose(t, batch_ndim):
if layout is torch.sparse_csc:
return t.transpose(batch_ndim, batch_ndim + 1)
return t
mn = [5, 2, 0]
for (m, n) in itertools.product(mn, mn):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
sparse = to_compressed(dense)
self.assertEqual(sparse.to_dense(), dense)
batch_shape = (2, 3)
compressed_indices = torch.tensor([0, 3, 5], device=device).repeat(6, 1).reshape(*batch_shape, -1)
plain_indices = torch.tensor([0, 1, 2, 0, 1], device=device).repeat(6, 1).reshape(*batch_shape, -1)
values = torch.tensor([1, 2, 1, 3, 4], device=device, dtype=dtype).repeat(6, 1).reshape(*batch_shape, -1)
sparse = compressed_constructor(compressed_indices, plain_indices, values, dtype=dtype, device=device)
dense_shape = get_dense_shape(sparse.shape, len(batch_shape))
dense = torch.tensor([[1, 2, 1], [3, 4, 0]], dtype=dtype, device=device).repeat(6, 1).reshape(dense_shape)
self.assertEqual(sparse.to_dense(), transpose(dense, len(batch_shape)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_to_dense(self, device, dtype):
self._test_sparse_compressed_to_dense(device, dtype, torch.sparse_csr)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csc_to_dense(self, device, dtype):
self._test_sparse_compressed_to_dense(device, dtype, torch.sparse_csc)
@skipMeta
@skipCPUIfNoMklSparse
@coalescedonoff
@dtypes(torch.double)
def test_coo_to_csr_convert(self, device, dtype, coalesced):
with self.assertRaisesRegex(RuntimeError, "Input is supposed to be a vector"):
torch._convert_indices_from_coo_to_csr(
torch.randint(100, (5, 5), device=device),
size=100)
size = (5, 5)
sparse_dim = 2
nnz = 10
sparse_coo, _, _ = self.genSparseTensor(size, sparse_dim, nnz, coalesced, device, dtype)
sparse_csr = sparse_coo.to_sparse_csr()
self.assertTrue(sparse_csr.is_sparse_csr)
self.assertEqual(sparse_csr.to_dense(), sparse_coo.to_dense())
vec = torch.randn((5, 1), dtype=dtype, device=device)
coo_product = sparse_coo.matmul(vec)
csr_product = sparse_csr.matmul(vec)
self.assertEqual(coo_product, csr_product)
vec = torch.randn((100, 1), dtype=dtype, device=device)
index = torch.tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], dtype=torch.int32)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
coo = torch.sparse_coo_tensor(index, values, torch.Size([100, 100]), dtype=dtype, device=device)
csr = coo.to_sparse_csr()
self.assertEqual(coo.matmul(vec), csr.matmul(vec))
col_indices = torch.tensor([
31, 92, 65, 50, 34, 62, 22, 56, 74, 89
], dtype=torch.int64, device=device)
self.assertEqual(csr.col_indices(), col_indices)
values = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
self.assertEqual(csr.values(), values)
@parametrize("blocksize", [2, 4])
@dtypes((torch.double, torch.int32), (torch.double, torch.int64))
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@skipMeta
def test_csr_to_block_csr(self, device, dtypes, blocksize):
for shape in [(24, 24), (12, 24)]:
dtype, index_dtype = dtypes
m, k = shape
nnz = random.randint(0, m * k)
t = self.genSparseCSRTensor((m * blocksize, k * blocksize), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
st = sp.csr_matrix((t.values().cpu(), t.col_indices().cpu(), t.crow_indices().cpu()), shape=tuple(t.size()))
block_t = t.to_sparse_bsr((blocksize, blocksize))
self.assertEqual(block_t.values().dim(), 3)
self.assertTrue(block_t.layout == torch.sparse_bsr)
block_st = st.tobsr(blocksize=(blocksize, blocksize))
block_st.sort_indices()
self.assertEqual(block_t.values().cpu(), block_st.data)
self.assertEqual(block_t.col_indices().cpu(), torch.tensor(block_st.indices).to(index_dtype))
self.assertEqual(block_t.crow_indices().cpu(), torch.tensor(block_st.indptr).to(index_dtype))
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_csr_to_block_csr_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
nnz = 15
t = self.genSparseCSRTensor((16, 16), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError,
r"tensor sparse size \(.*,.*\) must be divisible by given blocksize \(.*,.*\)"):
block_t = t.to_sparse_bsr((5, 5))
# TODO: Support auto generation of device check for sparse tensors
# See: https://github.com/pytorch/pytorch/issues/59058
@onlyCUDA
@dtypes(torch.double)
def test_matmul_device_mismatch(self, device, dtype):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
for s, m1, m2 in itertools.product((cpu, cuda), repeat=3):
csr = m1.to_sparse()
if s.device == csr.device == m2.device:
torch.addmm(s, csr, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, csr, m2)
@skipCPUIfNoMklSparse
@skipCUDAIfNoSparseGeneric
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else []))
def test_csr_matvec(self, device, dtype):
if TEST_WITH_ROCM and (dtype == torch.half or dtype == torch.bfloat16):
self.skipTest("ROCm doesn't work with half dtypes correctly.")
side = 100
for index_dtype in [torch.int32, torch.int64]:
csr = self.genSparseCSRTensor((side, side), 1000, device=device, dtype=dtype, index_dtype=index_dtype)
vec = torch.randn(side, dtype=dtype, device=device)
res = csr.matmul(vec)
expected = csr.to_dense().matmul(vec)
self.assertEqual(res, expected)
bad_vec = torch.randn(side + 10, dtype=dtype, device=device)
err_msg = "size mismatch, got"
with self.assertRaisesRegex(RuntimeError, err_msg):
csr.matmul(bad_vec)
@onlyCUDA
# hmm, the test passes ok on CUDA when Rocm is not available:
@skipCUDAIfRocmVersionLessThan((5, 2))
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_baddbmm(self, device, dtype):
# TODO: disable the invariant checks within torch.baddbmm that
# constructs unconventional csr tensors leading to
# RuntimeError: tensor dimensionality must be sum of batch,
# base, and dense dimensionalities (=0 + 2 + 0) but got 3
# when invariant checking is enabled. When done, undecorate run_test.
@torch.sparse.check_sparse_tensor_invariants(enable=False)
def run_test(c, a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta, out=out)
expected = [torch.addmm(c[i], a, b[i], alpha=alpha, beta=beta) for i in range(c.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([2, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch dimension in the shape
a_batched = torch.sparse_csr_tensor(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k), check_invariants=False)
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((batch_size, m, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(c, a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
@onlyCUDA
@unittest.skipIf(TEST_WITH_ROCM, "Only CUDA 11+ is supported")
@skipCUDAIfNoSparseGeneric
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_bmm(self, device, dtype):
def run_test(a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.bmm(a_batched, b)
out = torch.empty_like(actual.mH if op_out and a.shape == b.shape else actual)
torch.bmm(a_batched, b, out=out)
expected = [torch.mm(a, b[i]) for i in range(b.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([2, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch
# dimension in the shape. It is unorthodox in PyTorch
# to represent a batch sparse tensor in this way,
# hence checking the tensor invariants is locally
# turned off.
a_batched = torch.sparse_csr_tensor(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k), check_invariants=False)
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
def run_test_block_addmm_addmv(self,
addmv_addmm,
c,
a,
b,
op_b=False,
op_out=False,
*,
dtype=None,
device=None,
ref=_npref_block_addmm_addmv):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = addmv_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
addmv_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
expected = ref(c, a, b, alpha, beta)
self.assertEqual(actual, out)
self.assertEqual(actual, expected, lambda msg: f"{msg}\na={a}\nc={c}\nb={b}\nalpha={alpha} beta={beta}")
# TODO: block_size 1 is broken
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@skipIfTorchDynamo("raises 'sparse matrix length is ambiguous; use getnnz()'")
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else []))
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-5, torch.complex128: 1e-5,
torch.float16: 1e-3, torch.bfloat16: 1e-3})
def test_block_addmm(self, device, dtype, index_dtype, block_size, noncontiguous):
def make_transposed_addmm_op(f):
def tt(t):
if isinstance(t, torch.Tensor):
return t.transpose(-2, -1)
else:
# assume numpy/scipy spmatrix
return t.transpose()
@functools.wraps(f)
def wrapper(c, a, b, alpha=None, beta=None, out=None):
if out is not None:
# the ref takes no out kwarg
assert isinstance(out, torch.Tensor)
# transpose inplace to propagate out to checking context
out.transpose_(-2, -1)
return f(tt(c), tt(b), tt(a), alpha=alpha, beta=beta, out=out)
else:
return f(tt(c), tt(b), tt(a), alpha=alpha, beta=beta)
return wrapper
def ref_sp_numpy(c, a, b, alpha=None, beta=None, out=None):
def prep_input(t):
def to_sp_block_compressed(t):
if t.layout is torch.sparse_bsc:
tt = t.transpose(-1, -2)
else:
tt = t
t_sp_bsr = sp.bsr_matrix(
(
tt.values().cpu().numpy(),
tt.col_indices().cpu().numpy(),
tt.crow_indices().cpu().numpy(),
),
shape=tt.shape,
)
if t.layout is torch.sparse_bsc:
return t_sp_bsr.transpose()
else:
return t_sp_bsr
if t.layout is not torch.strided:
return to_sp_block_compressed(t)
else:
return t.cpu().resolve_conj().numpy()
res = _npref_block_addmm_addmv(
*(prep_input(t) for t in (c, a, b)),
alpha,
beta
)
if out is not None:
out.copy_(res)
return out
else:
return res
def ref_half_bfloat16(c, a, b, alpha=None, beta=None, out=None):
res = alpha * (a.to_dense().to(torch.float32) @ b.to_dense().to(torch.float32)).to(a.dtype) + beta * c
if out is not None:
out.copy_(res)
return out
else:
return res
if dtype in (torch.half, torch.bfloat16):
ref = ref_half_bfloat16
else:
ref = ref_sp_numpy
for (m, n, k) in itertools.product([2, 5], repeat=3):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size), check_invariants=False)
b = make_tensor((k * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
self.run_test_block_addmm_addmv(torch.addmm, c, a, b, op_b, op_out, dtype=dtype, device=device, ref=ref)
self.run_test_block_addmm_addmv(make_transposed_addmm_op(torch.addmm),
c,
a,
b,
op_b,
op_out,
dtype=dtype,
device=device,
ref=make_transposed_addmm_op(ref))
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_addmv(self, device, dtype, index_dtype, block_size, noncontiguous):
# TODO: Explicitly disable block size 1 support
# if (TEST_WITH_ROCM or not TEST_CUSPARSE_GENERIC) and block_size == 1:
# return
def ref_block_addmv(c, a, b, alpha, beta):
return _npref_block_addmm_addmv(c, a.to_dense(), b, alpha, beta)
for (m, k) in itertools.product([2, 5], repeat=2):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size), check_invariants=False)
b = make_tensor((k * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
self.run_test_block_addmm_addmv(torch.addmv, c, a, b, dtype=dtype, device=device, ref=ref_block_addmv)
@parametrize("matrix_shape", [(3, 3), (5, 7), (11, 9)], name_fn=lambda x: "shape_{}x{}".format(*x))
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@onlyCPU
def test_addmv(self, device, dtype, matrix_shape):
mat = torch.randn(matrix_shape, dtype=dtype, device=device)
mat[mat.real < 0] = 0
sparse_mat = mat.to_sparse_csr()
mvec = torch.randn((mat.size(1),), dtype=dtype, device=device)
avec = torch.randn((mat.size(0),), dtype=torch.float64, device=device)
ref_output = torch.addmv(avec, mat, mvec)
output = torch.addmv(avec, sparse_mat, mvec)
self.assertEqual(ref_output, output)
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_triangular_solve(self, device, dtype, index_dtype, block_size, noncontiguous):
def run_test(a, b, upper, transpose, unitriangular, op_out):
if unitriangular and self.device_type == 'cpu':
# TODO: When unitriangular=True results are not correct on CPU
return
if not upper and self.device_type == 'cpu':
# TODO: When upper=False some generated inputs might crash on CPU
return
actual = torch.triangular_solve(b, a, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if a._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
# TODO: replace with torch method when implemented to_dense() on block sparse tensor
a_bsr = sp.bsr_matrix(
(
a.values().cpu().numpy(),
a.col_indices().cpu().numpy(),
a.crow_indices().cpu().numpy(),
),
shape=a.shape,
)
expected_X, _ = torch.triangular_solve(
b,
torch.tensor(a_bsr.todense(), device=device),
transpose=transpose,
upper=upper,
unitriangular=unitriangular)
if expected_X.isnan().any():
# TODO: zeros on the diagonal are not handled for CPU path
# there's no way to query this info from MKL
if self.device_type == 'cuda' and not TEST_WITH_ROCM:
self.assertTrue(actual_X.isnan().any() or actual_X.isinf().any())
return
self.assertEqual(actual_X, expected_X)
out = torch.empty_like(b.mH if op_out and a.shape == b.shape else b)
torch.triangular_solve(
b, a,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, actual_X)
self.assertEqual(out, expected_X)
for (m, k) in itertools.product([2, 3], [1, 3]):
nnz = random.randint(0, m * m)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, m * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, m), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, m * block_size), check_invariants=False)
b = make_tensor((m * block_size, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
for (upper, unitriangular, transpose, op_out) in itertools.product([True, False], repeat=4):
run_test(a, b, upper, unitriangular, transpose, op_out)
@skipCPUIfNoMklSparse
@unittest.skipIf(TEST_WITH_ROCM, "Only CUDA 11+ is supported")
@dtypes(torch.double)
def test_mm(self, device, dtype):
def test_shape(di, dj, dk, nnz0=None, nnz1=None):
for index_dtype in [torch.int32, torch.int64]:
alpha = random.random()
beta = random.random()
def _test_addmm(t, x, y):
# TODO: addmm doesn't support strided result for sparse inputs.
# res = beta * t + alpha * (x @ y)
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, x.to_dense(), y.to_dense(), beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, x.to_dense(), y.to_dense())
self.assertEqual(res, expected)
def _test_mm(x, y):
res = torch.mm(x, y)
expected = torch.mm(x.to_dense(), y.to_dense())
if x.layout is torch.strided or y.layout is torch.strided:
self.assertEqual(res.layout, torch.strided)
else:
self.assertEqual(res.layout, torch.sparse_csr)
self.assertEqual(res.to_dense(), expected)
def _test(t, x, y):
_test_addmm(t, x, y)
_test_mm(x, y)
if nnz0 is None:
nnz0 = random.randint(di * dk // 2, di * dk)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = self.genSparseCSRTensor((di, dk), nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = torch.randn(dk, dj, dtype=dtype, device=device)
_test(t, x, y)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = self.genSparseCSCTensor((di, dk), nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = torch.randn(dk, dj, dtype=dtype, device=device)
_test(t, x, y)
if nnz1 is None:
nnz1 = random.randint(dk * dj // 2, dk * dj)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = torch.randn(di, dk, dtype=dtype, device=device)
y = self.genSparseCSRTensor((dk, dj), nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test(t, x, y)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = torch.randn(di, dk, dtype=dtype, device=device)
y = self.genSparseCSCTensor((dk, dj), nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test(t, x, y)
x_shape, y_shape = x.shape, y.shape
gen_csr_csc = [self.genSparseCSRTensor, self.genSparseCSCTensor]
# Test mm({CSR, CSC}, {CSR, CSC})
for gen_x, gen_y in itertools.product(gen_csr_csc, gen_csr_csc):
x = gen_x(x_shape, nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = gen_y(y_shape, nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test_mm(x, y)
def test_empty_inputs(lhs_layout, rhs_layout):
xd = torch.rand(10, 0, device=device, dtype=dtype)
yd = xd.transpose(-2, -1)
zd = torch.rand(0, 0, device=device, dtype=dtype)
xls, yls, zls = (t.to_sparse(layout=lhs_layout) for t in (xd, yd, zd))
xrs, yrs, zrs = (t.to_sparse(layout=rhs_layout) for t in (xd, yd, zd))
for ls, rs, ld, rd in [(xls, yrs, xd, yd), (xls, zrs, xd, zd), (zls, yrs, zd, yd), (zls, zrs, zd, zd)]:
res_sparse = ls @ rs
res_dense = ld @ rd
self.assertEqual(res_sparse.to_dense(), res_dense)
def test_orthogonal_inputs(lhs_layout, rhs_layout):
ones = torch.ones(2, 2, device=device, dtype=dtype)
zeros = torch.zeros(2, 2, device=device, dtype=dtype)
x = torch.cat((ones, zeros), -1).to_sparse(layout=lhs_layout)
y = torch.cat((zeros, ones), -2).to_sparse(layout=rhs_layout)
res = x @ y
res_expected = torch.zeros(*res.shape, device=device, dtype=dtype, layout=res.layout)
self.assertEqual(res, res_expected)
for lhs_layout, rhs_layout in itertools.product([torch.sparse_csr, torch.sparse_csc], repeat=2):
test_empty_inputs(lhs_layout, rhs_layout)
test_orthogonal_inputs(lhs_layout, rhs_layout)
for i in [2, 4]:
for j in [2, 4, 7]:
for k in [2, 3, 7]:
test_shape(i, j, k)
test_shape(4, 4, 4, 0, 0)
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_mm(self, device, dtype):
def test_shape(d1, d2, d3, nnz, transposed, index_dtype):
if transposed:
D = torch.randn(d3, d2, dtype=dtype, device=device).t_()
else:
D = torch.randn(d2, d3, dtype=dtype, device=device)
S = self.genSparseCSRTensor((d1, d2), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
S_dense = S.to_dense()
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype)
test_shape(7, 8, 9, 20, True, index_dtype)
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_addmm(self, device, dtype):
def test_shape(m, n, p, nnz, broadcast, index_dtype, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device)
D2 = make_tensor([m, p], dtype=dtype, device=device)
S = self.genSparseCSRTensor([n, m], nnz, dtype=dtype, device=device, index_dtype=index_dtype)
S_dense = S.to_dense()
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype, None)
test_shape(7, 8, 9, 20, True, index_dtype, None)
test_shape(7, 8, 9, 20, False, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, False, index_dtype, (1, 1))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 1))
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@sparse_compressed_nonblock_layouts()
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
def test_addmm_all_sparse_csr(self, device, dtype, layout):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="all_sparse")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="all_sparse")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=layout, mode="all_sparse")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=layout, mode="all_sparse")
@onlyCPU
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@sparse_compressed_nonblock_layouts()
def test_addmm_dense_result(self, device, dtype, layout):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="dense_result")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=layout, mode="dense_result")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=layout, mode="dense_result")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=layout, mode="dense_result")
@parametrize("k", [0, 1, 8])
@parametrize("n", [0, 1, 10])
@parametrize("m", [0, 1, 25])
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128]
if CUSPARSE_SPMM_COMPLEX128_SUPPORTED or HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
else []))
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
def test_addmm_sizes_all_sparse_csr(self, device, dtype, m, n, k):
if (TEST_WITH_ROCM and k != 0 and n != 0 and m != 0):
self.skipTest("Skipped on ROCm")
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
M = torch.randn(n, m, device=device).to(dtype).to_sparse_csr()
m1 = torch.randn(n, k + 1, device=device).to(dtype).to_sparse_csr()
m2 = torch.randn(k, m, device=device).to(dtype).to_sparse_csr()
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a)
else:
return torch.addmm(a, a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a.unsqueeze(0))
else:
return torch.addmm(a, a, a.unsqueeze(0))
def test3(*, is_sparse):
# the first input needs to be 1D or 2D
a = make_tensor((3, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a.unsqueeze(0), a_sparse, a)
else:
return torch.addmm(a.unsqueeze(0), a, a)
for test in (test1, test2, test3):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_mm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a)
else:
return torch.mm(a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a.unsqueeze(0))
else:
return torch.mm(a, a.unsqueeze(0))
for test in (test1, test2):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@sparse_compressed_nonblock_layouts()
@dtypes(torch.float, torch.double)
def test_add(self, device, layout, dtype):
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCompressedTensor(shape, nnz,
dtype=dtype,
device=device,
index_dtype=torch.int32,
layout=layout,
blocksize=())
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
res_perm = torch.add(x, y, alpha=r)
self.assertEqual(res_perm, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
res_perm = torch.add(x, y, alpha=r)
self.assertEqual(res, expected)
self.assertEqual(res_perm, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
@dtypes(torch.float, torch.double)
def test_mul(self, device, dtype):
# TODO: This whole test should be migrated to OpInfos
def _test_spadd_shape(fn, nnz, shape):
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
# Forward comparison
res_sparse_sparse = fn(y, x)
res_dense_sparse = fn(y.to_dense(), x)
res_sparse_dense = fn(y, x.to_dense())
expected = fn(y.to_dense(), x.to_dense())
self.assertEqual(res_sparse_sparse, expected)
# TODO: While result of mul(dense, csr) is csr, it is not fully compressed.
# That means it may contain materialized zeros, since the dense argument
# is converted according to the sparsity pattern of csr. In the future
# we might require the result to be fully compressed.
self.assertEqual(res_dense_sparse, expected)
self.assertEqual(res_sparse_dense, expected)
# Grad comparison
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
z = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
# csr * csr -> csr with csr, csr gradients
x_a = x.clone().requires_grad_()
y_a = y.clone().requires_grad_()
fn(y_a, x_a).backward(z)
x_dense_a = x.to_dense().requires_grad_()
y_dense_a = y.to_dense().requires_grad_()
fn(y_dense_a, x_dense_a).backward(z.to_dense())
self.assertEqual(x_a.grad.layout, torch.sparse_csr)
self.assertEqual(y_a.grad.layout, torch.sparse_csr)
self.assertEqual(x_a.grad.to_dense(), x_dense_a.grad)
self.assertEqual(y_a.grad.to_dense(), y_dense_a.grad)
# TODO: Currently strided Tensors cannot have csr gradients
# dense * csr -> csr with csr, dense gradients
x_a = x.clone().requires_grad_()
y_a = y.to_dense().clone().requires_grad_()
err_msg = "Function MulBackward0 returned an invalid gradient at index 0 - expected layout Strided but got SparseCsr"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(y_a, x_a).backward(z)
# csr * dense -> csr with dense, csr gradients
x_a = x.to_dense().clone().requires_grad_()
y_a = y.clone().requires_grad_()
err_msg = "Function MulBackward0 returned an invalid gradient at index 1 - expected layout Strided but got SparseCsr"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(y_a, x_a).backward(z)
_test_spadd_shape(torch.mul, 100, [100, 100])
_test_spadd_shape(torch.mul, 0, [100, 100])
_test_spadd_shape(torch.mul, 100, [100, 1])
_test_spadd_shape(torch.mul, 100, [1, 100])
# TODO: enable hybrid once to_dense supports it
@parametrize('enable_hybrid', [False])
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half))
def test_mul_scalar(self, layout, device, dtype, enable_hybrid):
for sparse in self.generate_simple_inputs(
layout, device=device, dtype=dtype, index_dtype=torch.int32, enable_hybrid=enable_hybrid):
for scalar_dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half):
# ComplexHalf is experimental
if dtype is torch.half and scalar_dtype.is_complex:
continue
scalar_t = torch.tensor(2, dtype=scalar_dtype)
for scalar in (scalar_t, scalar_t.item()):
res_out = sparse.mul(scalar)
self.assertEqual(res_out, scalar * sparse)
res_dense_out = sparse.to_dense().mul(scalar)
# BUG: dispatcher ignores mul.Scalar(Tensor, Scalar)
# This issues is circumvented in the mul(Tensor, Tensor) kernel.
self.assertEqual(res_out, res_dense_out)
if dtype == torch.result_type(sparse, scalar):
res_in_dense = sparse.to_dense().mul_(scalar)
res_in = sparse.clone().mul_(scalar)
self.assertEqual(res_in, res_in_dense)
self.assertEqual(res_out, res_in)
@skipCPUIfNoMklSparse
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add(self, device, dtype):
def run_test(m, n, index_dtype):
alpha = random.random()
nnz1 = random.randint(0, m * n)
nnz2 = random.randint(0, m * n)
nnz3 = random.randint(0, m * n)
if TEST_WITH_ROCM:
# ROCm fails when nnz = 0
nnz1, nnz2, nnz3 = max(1, nnz1), max(1, nnz2), max(1, nnz3)
S1 = self.genSparseCSRTensor([m, n], nnz1, dtype=dtype, device=device, index_dtype=index_dtype)
S2 = self.genSparseCSRTensor([m, n], nnz2, dtype=dtype, device=device, index_dtype=index_dtype)
S3 = self.genSparseCSRTensor([m, n], nnz3, dtype=dtype, device=device, index_dtype=index_dtype)
sparse_args = [S1, S2, S3]
dense_args = [t.to_dense() for t in sparse_args]
arg_idx = list(range(len(sparse_args)))
out_idx = arg_idx + [None]
for idx1, idx2, idx3 in itertools.product(arg_idx, arg_idx, out_idx):
s1 = sparse_args[idx1]
s2 = sparse_args[idx2]
s3 = None if idx3 is None else sparse_args[idx3]
d1 = dense_args[idx1]
d2 = dense_args[idx2]
d3 = None if idx3 is None else dense_args[idx3]
expected = torch.add(d1, d2, alpha=alpha, out=d3)
actual = torch.add(s1, s2, alpha=alpha, out=s3)
self.assertEqual(actual.crow_indices().dtype, index_dtype)
self.assertEqual(actual.col_indices().dtype, index_dtype)
self.assertEqual(actual, expected)
self.assertEqual(s3, d3)
if s3 is not None:
self.assertEqual(s3.crow_indices().dtype, index_dtype)
self.assertEqual(s3.col_indices().dtype, index_dtype)
for index_dtype in [torch.int32, torch.int64]:
for m, n in itertools.product([3, 5], [3, 5]):
run_test(m, n, index_dtype)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add_errors(self, device, dtype):
def run_test(index_type):
a = self.genSparseCSRTensor((2, 2), 3, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor((2, 1), 2, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "Expected input tensors to have the same shape"):
torch.add(a, b)
for index_dtype in [torch.int32, torch.int64]:
run_test(index_dtype)
@skipCPUIfNoMklSparse
@skipCUDAIf(
not _check_cusparse_triangular_solve_available(),
"cuSparse Generic API SpSV is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sparse_triangular_solve(self, device, dtype):
def run_test(n, k, upper, unitriangular, transpose, zero):
if not unitriangular:
triangle_function = torch.triu if upper else torch.tril
else:
# Make sure diagonal elements are not materialized.
# This is to exercise `unitriangular=True` not relying on
# explicit presence of these indices.
if upper:
def remove_diagonal(t):
return t.triu(-1)
else:
def remove_diagonal(t):
return t.tril(-1)
triangle_function = remove_diagonal
make_A = torch.zeros if zero else make_tensor
A = make_A((n, n), dtype=dtype, device=device)
A = triangle_function(A)
A_sparse = A.to_sparse_csr()
B = make_tensor((n, k), dtype=dtype, device=device)
expected = torch.triangular_solve(B, A, upper=upper, unitriangular=unitriangular, transpose=transpose)
expected_X = expected.solution
actual = torch.triangular_solve(B, A_sparse, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if A_sparse._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
self.assertEqual(actual_X, expected_X)
# test out with C contiguous strides
out = torch.empty_strided((n, k), (k, 1), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
# test out with F contiguous strides
out = torch.empty_strided((n, k), (1, n), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), (1, n))
# test out with discontiguous strides
out = torch.empty_strided((2 * n, k), (1, 2 * n), dtype=dtype, device=device)[::2]
if n > 0 and k > 0:
self.assertFalse(out.is_contiguous())
self.assertFalse(out.t().is_contiguous())
before_stride = out.stride()
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), before_stride)
ks = [0, 1, 3]
ns = [5, 3, 0]
for (k, n), (upper, unitriangular, transpose, zero) in itertools.product(itertools.product(ks, ns),
itertools.product([True, False], repeat=4)):
run_test(n, k, upper, unitriangular, transpose, zero)
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm(self, device, dtype):
def run_test(c, a, b, op_a, op_b, *, alpha=None, beta=None):
if dtype.is_complex:
alpha = random.random() + 0.3j if alpha is None else alpha
beta = random.random() + 0.6j if beta is None else beta
else:
alpha = random.random() if alpha is None else alpha
beta = random.random() if beta is None else beta
if op_a and a.shape == b.shape:
a = a.mH
if op_b and a.shape == b.shape:
b = b.mH
actual = torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.sparse_csr_tensor(
*map(torch.clone, (actual.crow_indices(), actual.col_indices())),
torch.empty_like(actual.values()),
size=actual.shape
)
torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
spy_c = torch.sparse_csr_tensor(c.crow_indices(), c.col_indices(), torch.ones_like(c.values()), size=c.shape)
expected = alpha * (a @ b) * spy_c.to_dense() + beta * c.to_dense()
self.assertEqual(actual.to_dense(), out.to_dense())
self.assertEqual(actual.to_dense(), expected)
mnk = list(itertools.product([2, 5], repeat=3))
# Add a test case for size 0 a and b tensors
mnk = mnk + [(5, 5, 0)]
batch_shapes = [(), (2,), (2, 3)]
tf = [True, False]
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), b, noncontiguous, bcast_c in itertools.product(mnk, batch_shapes, tf, tf):
if bcast_c and len(b) == 0:
continue
nnz = random.randint(0, m * n)
c_batch = () if bcast_c else b
c = self.genSparseCSRTensor((*c_batch, m, n), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a = make_tensor((*b, m, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
b = make_tensor((*b, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_a, op_b in itertools.product([True, False], repeat=2):
run_test(c, a, b, op_a, op_b)
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_autograd(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
samples = list(sample_inputs_sparse_sampled_addmm(None, device, dtype, requires_grad=True))
for sample, dense_covector in zip(samples, [True, False]):
c = sample.input
a = sample.args[0]
b = sample.args[1]
# Compute sparse result
output = torch.sparse.sampled_addmm(c, a, b, **sample.kwargs)
covector = torch.randn_like(output).to_dense() if dense_covector else torch.randn_like(output)
output.backward(covector)
# Compute dense result and compare with sparse result
c1, a1, b1 = (x.detach().to_dense().requires_grad_(True) for x in [c, a, b])
dense_output = sample.kwargs['alpha'] * (a1 @ b1) * torch.ones_like(c).to_dense() + sample.kwargs['beta'] * c1
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
self.assertEqual(c.grad, c1.grad)
self.assertEqual(a.grad, a1.grad)
self.assertEqual(b.grad, b1.grad)
@onlyCUDA
# It works on ROCm and CUDA issue is currently active
@skipCUDAIf(not TEST_WITH_ROCM, "Causes CUDA memory exception, see https://github.com/pytorch/pytorch/issues/72177")
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm_zero_sized(self, device, dtype):
def run_test(c, a, b):
actual = torch.sparse.sampled_addmm(c, a, b)
self.assertEqual(actual.shape, c.shape)
for m, n, k in itertools.product([0, 5], repeat=3):
c = torch.empty(m, n, dtype=dtype, device=device, layout=torch.sparse_csr)
a = make_tensor((m, k), dtype=dtype, device=device)
b = make_tensor((k, n), dtype=dtype, device=device)
run_test(c, a, b)
@onlyCUDA
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse sampled versions
# import re
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"cannot be multiplied"):
torch.sparse.sampled_addmm(a_sparse, a, a)
# mat1 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a[..., 0, :], a)
# mat2 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a, a[..., 0, :])
a = make_tensor((2, 2), dtype=dtype, device=device)
b = make_tensor((3, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self.shape\[-2\] must match mat1.shape\[-2\]"):
torch.sparse.sampled_addmm(b_sparse, a, a)
b = make_tensor((2, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self.shape\[-1\] must match mat2.shape\[-1\]"):
torch.sparse.sampled_addmm(b_sparse, a, a)
a = make_tensor((2, 2), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a_sparse, a_sparse)
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a, a_sparse)
@onlyCPU
@dtypes(torch.float32, torch.float64, torch.bfloat16, torch.float16)
@precisionOverride({torch.bfloat16: 0.01})
def test_sparse_mm_reduce_sum(self, device, dtype):
def run_test(m, n, k, nnz, train):
sparse = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=torch.int64)
dense = sparse.to_dense()
mat = torch.randn(k, n, dtype=dtype)
ref_mat = mat.clone()
if train:
sparse.requires_grad_()
mat.requires_grad_()
dense.requires_grad_()
ref_mat.requires_grad_()
ref_out = torch.mm(dense, ref_mat)
out = torch.sparse.mm(sparse, mat, 'sum')
self.assertEqual(out, ref_out)
if train:
ref_out.sum().backward()
out.sum().backward()
grad_input = sparse.grad
ref_grad_input = dense.grad
grad_mat = mat.grad
ref_grad_mat = ref_mat.grad
self.assertEqual(grad_input.to_dense(), ref_grad_input)
self.assertEqual(grad_mat, ref_grad_mat)
run_test(4, 5, 4, 10, False)
run_test(4, 4, 4, 16, True)
@skipIfTorchDynamo()
@onlyCPU
@dtypes(torch.float32, torch.float64, torch.bfloat16, torch.float16)
@precisionOverride({torch.bfloat16: 0.01, torch.float16: 0.01})
def test_sparse_mm_reduce(self, device, dtype):
def run_test(m, n, k, nnz, reduce_type, index_dtype, train):
csr = self.genSparseCSRTensor((m, n), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
mat = torch.randn(n, k, dtype=dtype)
ref_mat = mat.clone()
ref_values = csr.values().clone()
out_int32 = index_dtype == torch.int32
coo_indices = torch._convert_indices_from_csr_to_coo(
csr.crow_indices(),
csr.col_indices(),
out_int32=out_int32)
row, col = coo_indices[0], coo_indices[1]
def ref(row, col, val, mat):
out = torch.zeros([m, k], dtype=dtype)
weight = mat.index_select(0, col)
src = weight.mul(val.view(-1, 1))
index = row.view(-1, 1).expand_as(weight)
index = index.to(dtype=torch.int64)
# scatter_reduce expect index to be int64
out.scatter_reduce_(0, index, src, reduce=reduce_type, include_self=False)
return out
if train:
csr.requires_grad_()
mat.requires_grad_()
ref_values.requires_grad_()
ref_mat.requires_grad_()
ref_out = ref(row, col, ref_values, ref_mat)
out = torch.sparse.mm(csr, mat, reduce_type)
self.assertEqual(out, ref_out)
if train and dtype not in (torch.bfloat16, torch.float16):
ref_out.sum().backward()
out.sum().backward()
grad_values = csr.grad.values()
grad_weight = mat.grad
ref_grad_values = ref_values.grad
ref_grad_weight = ref_mat.grad
self.assertEqual(grad_values, ref_grad_values)
self.assertEqual(grad_weight, ref_grad_weight)
for train in [False, True]:
for index_dtype in [torch.int32, torch.int64]:
for reduce_type in ["sum", "mean", "amax", "amin"]:
# by setting nnz < M, create empty rows
run_test(3, 4, 11, 1, reduce_type, index_dtype, train)
run_test(3, 4, 11, 6, reduce_type, index_dtype, train)
run_test(3, 4, 11, 12, reduce_type, index_dtype, train)
# we are doing blocking with 4x vector length in the kernel,
# so need to test when K > 4x vector length
run_test(4, 7, 33, 13, reduce_type, index_dtype, train)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse()
csr_sparse = coo_sparse.to_sparse_csr()
self.assertEqual(csr_sparse.to_dense(), dense)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_csr_coo_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
csr_sparse = dense.to_sparse_csr()
coo_sparse = csr_sparse.to_sparse()
self.assertEqual(coo_sparse.to_dense(), dense)
# Currently, there is no rule in PyTorch for filling zeros in the outputs
# from operations on Sparse CSR tensors. Hence only those operators are supported
# which have 0->0 correspondence, example: sin(0) = 0, tan(0) = 0 but
# cos(0) = 1 (and hence it's not supported).
# Note: here, we do this test only for unary operators
@ops(sparse_csr_unary_ufuncs)
def test_zero_to_zero_correspondence_unary(self, device, dtype, op):
zero = torch.zeros((1, 2), dtype=dtype, device=device)
tensor_explicit_zeros = torch.sparse_csr_tensor([0, 1], [1], [0], dtype=dtype, device=device)
output_zero = op(zero)
expected_zero = zero.to(output_zero.dtype)
output_explicit_zeros = op(tensor_explicit_zeros).to_dense()
expected_explicit_zeros = tensor_explicit_zeros.to_dense().to(output_explicit_zeros.dtype)
for (output, expected) in [
(output_zero, expected_zero),
(output_explicit_zeros, expected_explicit_zeros)
]:
self.assertEqual(output, expected, f"This operator ({op.name}) should not be supported for "
"Sparse CSR as it breaks 0->0 correspondence.")
for inp in [zero.to_sparse_csr(), tensor_explicit_zeros]:
self.assertEqual(op(inp).values().numel(), inp.values().numel(),
f"{op.name} fails to preserve sparsity pattern.")
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_out(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = self.genSparseCSRTensor(sample.input.size(), sample.input._nnz(),
device=sample.input.device, dtype=expect.dtype,
index_dtype=sample.input.crow_indices().dtype)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_inplace(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if op.inplace_variant is None:
self.skipTest("Skipped! Inplace variant not supported!")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
if sample.input.is_complex() and op.name == "abs":
with self.assertRaisesRegex(RuntimeError, "not supported"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@ops(sparse_csr_unary_ufuncs, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble])
def test_autograd_sparse_csr_unary(self, device, dtype, op):
if op.name not in UNARY_EWISE_CSR_ALLOW_AUTOGRAD:
self.skipTest(f"Skipped! Unary op {op.name} not supported with CSR input and autograd")
samples = list(op.sample_inputs(device, dtype))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
for sample in samples:
# We must skip samples of low dimensionality, we can't covert them to sparsed compressed layouts
if sample.input.ndim < 2:
continue
sparse_input = sample.input.to_sparse_csr().requires_grad_(True)
def fn(input):
output = op.gradcheck_wrapper(op.get_op(), input, *sample.args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# Compute sparse result
output = fn(sparse_input)
covector = torch.randn_like(output)
output.backward(covector)
self.assertTrue(torch.is_tensor(sparse_input.grad))
self.assertTrue(sparse_input.grad.is_sparse_csr)
# Compute dense result and compare with sparse result
dense_input = sparse_input.detach().to_dense().requires_grad_(True)
dense_output = fn(dense_input)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
self.assertEqual(sparse_input.grad, dense_input.grad)
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float64)
def test_autograd_dense_output_addmm(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
samples = list(sample_inputs_addmm(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
a = sample.args[0].relu().to_sparse_csr()
if sample.args[0].shape == sample.args[1].shape:
import warnings
warnings.warn("Broken for square matrices, see https://github.com/pytorch/pytorch/issues/116565")
continue
# This path tests the autograd path wrt dense inputs
for addmm in [torch.addmm, torch.sparse.addmm]:
def fn(c, b):
output = addmm(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
# Now test the autograd path wrt sparse inputs
for reverse in [True, False]:
c, b = sample.input, sample.args[1]
if reverse and a.shape != b.shape:
continue
def fn(a):
inputs = (c, b, a) if reverse else (c, a, b)
output = addmm(*inputs, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# gradcheck doesn't work for sparse CSR yet, compare against dense path
# Compute sparse result
a = a.detach().requires_grad_(True)
output = fn(a)
covector = torch.randn_like(output)
output.backward(covector)
self.assertTrue(torch.is_tensor(a.grad))
if addmm == torch.sparse.addmm:
self.assertTrue(a.grad.is_sparse_csr)
else:
self.assertTrue(a.grad.layout == torch.strided)
# Compute dense result and compare with sparse result
dense_a = a.detach().to_dense().requires_grad_(True)
dense_output = fn(dense_a)
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
if addmm == torch.sparse.addmm:
self.assertEqual(a.grad, dense_a.grad.sparse_mask(a))
else:
self.assertEqual(a.grad, dense_a.grad)
@skipCPUIfNoMklSparse
@dtypes(torch.float64)
def test_autograd_dense_output_addmv(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
samples = list(sample_inputs_addmv(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
a = sample.args[0].to_sparse_csr().detach()
def fn(c, b):
output = torch.addmv(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@ops(binary_ops_with_dense_output, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, ])
def test_autograd_dense_output(self, device, dtype, op):
if op.name == "mv" and no_mkl_sparse and self.device_type == 'cpu':
self.skipTest("MKL Sparse is not available")
samples = list(op.sample_inputs(device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
# Here we assume that the signature is op(sparse_input, dense_input) -> dense_output
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
sparse_input = sample.input.to_sparse_csr().detach()
def fn(*args):
output = op.gradcheck_wrapper(op.get_op(), sparse_input, *args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, sample.args, fast_mode=True))
# noncontiguous
args = [make_tensor(a.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True) for a in sample.args]
self.assertTrue(torch.autograd.gradcheck(fn, args, fast_mode=True))
@dtypes(*all_types_and_complex())
def test_direct_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse_coo()
self.assertEqual(coo_sparse.to_sparse_csr().to_sparse_coo(), coo_sparse)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sum(self, device, dtype):
def run_test(shape, nnz, index_type):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.sum(), a.values().sum())
if dtype in floating_types():
a.requires_grad_(True)
a.sum().backward()
self.assertEqual(a.grad, torch.ones(shape, dtype=dtype, device=device))
for shape, index_dtype in itertools.product(
[(10, 5), (10, 10)],
[torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, max(shape), index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
@skipIfTorchDynamo()
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@all_sparse_compressed_layouts()
def test_transpose(self, device, dtype, layout):
def _check_transpose_view(subject, transpose):
self.assertTrue(transpose.values()._is_view())
self.assertTrue(transpose._is_view())
self.assertTrue(transpose._base is subject)
def _check_layout_invariants(transpose):
self.assertEqual(transpose.device, torch.device(device))
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[transpose.layout]
compressed_indices, plain_indices = compressed_indices_mth(transpose), plain_indices_mth(transpose)
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, transpose.values(),
transpose.shape, transpose.layout)
def check_good_transpose(subject, subject_dense, dim0, dim1, expected_layout):
transpose = subject.transpose(dim0, dim1)
# correct layout
self.assertEqual(transpose.layout, expected_layout)
# transpose must be return a view
_check_transpose_view(subject, transpose)
# result uses unsafe construction, so we check invariants
_check_layout_invariants(transpose)
self.assertEqual(transpose.to_dense(), subject_dense.transpose(dim0, dim1))
round_trip = transpose.transpose(dim0, dim1)
self.assertEqual(round_trip.layout, subject.layout)
# transpose must be return a view
_check_transpose_view(subject, round_trip)
# result uses unsafe construction, so we check invariants
_check_layout_invariants(round_trip)
self.assertEqual(round_trip.to_dense(), subject_dense)
def check_same_dim_transpose(subject, subject_dense, dim):
transpose = subject.transpose(dim, dim)
# correct layout
self.assertEqual(transpose.layout, subject.layout)
# transpose must be return a view
_check_transpose_view(subject, transpose)
# result uses unsafe construction, so we check invariants
_check_layout_invariants(transpose)
self.assertEqual(transpose.to_dense(), subject_dense)
def check_dim_type_mismatch_throws(subject, name0, dim0, name1, dim1):
mismatch_name = f"{dim0}\\({name0}\\) and {dim1}\\({name1}\\)"
err = r"transpose\(\): can only transpose dimensions of the same type \(Batch, Sparse, Dense\), got " + mismatch_name
with self.assertRaisesRegex(RuntimeError, err):
subject.transpose(dim0, dim1)
def run_test(shape, nnz, index_type, n_dense, blocksize=()):
subject = self.genSparseCompressedTensor(shape,
nnz,
layout=layout,
device=device,
index_dtype=index_type,
blocksize=blocksize,
dense_dims=n_dense,
dtype=dtype)
sparse0 = len(shape) - n_dense - 1
sparse1 = sparse0 - 1
dense0 = sparse0 + 1 if n_dense > 0 else None
dense1 = dense0 + 1 if n_dense > 1 else None
n_batch = len(shape) - n_dense - 2
batch0 = sparse1 - 1 if n_batch > 0 else None
batch1 = 0 if n_batch > 1 else None
sparse_dims = (sparse0, sparse1)
dense_dims = (dense0, dense1)
batch_dims = (batch0, batch1)
named0 = [(name, d[0]) for name, d in zip(["Batch", "Sparse", "Dense"], (batch_dims, sparse_dims, dense_dims))]
named1 = [(name, d[1]) for name, d in zip(["Batch", "Sparse", "Dense"], (batch_dims, sparse_dims, dense_dims))]
flipped_layout = {
torch.sparse_csr: torch.sparse_csc,
torch.sparse_csc: torch.sparse_csr,
torch.sparse_bsr: torch.sparse_bsc,
torch.sparse_bsc: torch.sparse_bsr
}[layout]
if n_dense > 0:
# expect all transpose to throw
for (name0, dim0), (name1, dim1) in itertools.product(named0, named1):
msg = r"transpose\(\): hybrid sparse compressed tensors with dense dimensions are not supported"
if (dim0 is not None) and (dim1 is not None):
with self.assertRaisesRegex(RuntimeError, msg):
subject.transpose(dim0, dim1)
else:
subject_dense = subject.to_dense()
for (name0, dim0), (name1, dim1) in itertools.product(named0, named1):
if dim0 is not None:
check_same_dim_transpose(subject, subject_dense, dim0)
if dim1 is not None:
if name0 == name1:
expected_layout = flipped_layout if name0 == "Sparse" else layout
check_good_transpose(subject, subject_dense, dim0, dim1, expected_layout)
else:
check_dim_type_mismatch_throws(subject, name0, dim0, name1, dim1)
# batch/sparse, sparse/dense only and full hybrid cases
shape_ndense = list(itertools.product([(2, 4, 6, 2), (10, 6, 4, 2), (2, 4, 4, 2, 6)], [0, 1, 2]))
# sparse only cases
shape_ndense += [[(4, 8), 0], [(2, 2), 0], [(8, 4), 0]]
for (shape, n_dense), index_dtype in itertools.product(shape_ndense, [torch.int32, torch.int64]):
n_batch = len(shape) - n_dense - 2
sparse_shape = shape[n_batch: n_batch + 2]
if layout in (torch.sparse_bsr, torch.sparse_bsc):
# for blocked all combinations of 2,1 should be valid blocksizes
run_test(shape, 0, index_dtype, n_dense, blocksize=(2, 2))
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(2, 2))
run_test(shape, sparse_shape[0] * sparse_shape[1], index_dtype, n_dense, blocksize=(2, 2))
# repeat the realistic sparseity case with varried block sizes
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(2, 1))
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(1, 2))
run_test(shape, max(sparse_shape), index_dtype, n_dense, blocksize=(1, 1))
else:
run_test(shape, 0, index_dtype, n_dense)
run_test(shape, max(sparse_shape), index_dtype, n_dense)
run_test(shape, sparse_shape[0] * sparse_shape[1], index_dtype, n_dense)
# TODO: This is a stopgap for a rigorous extension of our autograd tests
# to test the functionality of detach
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_exercise_detach(self, device, dtype):
shape = (3, 3)
nnz = 4
for index_dtype in [torch.int32, torch.int64]:
inp = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
detached_inp = inp.detach()
self.assertEqual(inp, detached_inp)
def _construct_sp_matrix(self, tensor, layout, blocksize=(2, 2)):
if tensor.layout in [torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.strided]:
tensor = tensor.to_dense()
else:
raise NotImplementedError(repr(tensor))
if layout is torch.sparse_csr:
return sp.csr_matrix(tensor.cpu().numpy())
if layout is torch.sparse_csc:
return sp.csc_matrix(tensor.cpu().numpy())
if layout is torch.sparse_bsr:
return sp.bsr_matrix(tensor.cpu().numpy(), blocksize=blocksize).sorted_indices()
if layout is torch.sparse_bsc:
# SciPy doesn't have native BSC support - but our tests don't need the full
# functionality so fake it by using a transposed BSR matrix.
class FakeBscMatrix:
def __init__(self, matrix):
self._matrix = matrix
self.shape = tuple(reversed(matrix.shape))
self.indptr = matrix.indptr
self.indices = matrix.indices
self.data = [x.transpose() for x in matrix.data]
@staticmethod
def from_matrix(matrix, blocksize):
blocksize = tuple(reversed(blocksize))
matrix = matrix.transpose()
return FakeBscMatrix(sp.bsr_matrix(matrix, blocksize=blocksize))
def sorted_indices(self):
sub = self._matrix.sorted_indices()
return FakeBscMatrix(sub)
return FakeBscMatrix.from_matrix(tensor.cpu().numpy(), blocksize=blocksize).sorted_indices()
raise NotImplementedError(repr(tensor))
@skipMeta
@all_sparse_compressed_layouts('to_layout')
@all_sparse_compressed_layouts('from_layout')
def test_compressed_layout_conversions_coverage(self, device, from_layout, to_layout):
"""This test performs a smoke test for covered conversion and verifies
that an exception is thrown for unsupported conversions.
TODO: This test covers a subset of
TestSparseAny.test_to_sparse tests and can be
eliminated. Keeping the test until the new
`Tensor.to_sparse(*, layout, blocksize)` has landed.
"""
allowed_pairwise_layouts_sets = {
frozenset({torch.sparse_csc}),
frozenset({torch.sparse_csr}),
frozenset({torch.sparse_csc, torch.sparse_csr}),
frozenset({torch.sparse_csc, torch.sparse_bsc}),
frozenset({torch.sparse_csc, torch.sparse_bsr}),
frozenset({torch.sparse_csr, torch.sparse_bsc}),
frozenset({torch.sparse_csr, torch.sparse_bsr}),
frozenset({torch.sparse_bsc}),
frozenset({torch.sparse_bsr}),
frozenset({torch.sparse_bsc, torch.sparse_bsr}),
}
block_layouts = (torch.sparse_bsr, torch.sparse_bsc)
def _to_from_layout(layout_a, layout_b, a):
expect_error = True
if {layout_a, layout_b} in allowed_pairwise_layouts_sets:
expect_error = False
# BSR -> CSR is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsr, torch.sparse_csr):
expect_error = True
# BSR -> CSC is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsr, torch.sparse_csc):
expect_error = True
# BSC -> CSR is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsc, torch.sparse_csr):
expect_error = True
# BSC -> CSC is not yet supported
if (layout_a, layout_b) == (torch.sparse_bsc, torch.sparse_csc):
expect_error = True
# CSR -> BSR only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csr, torch.sparse_bsr):
if a.dim() > 2:
expect_error = True
# CSR -> BSC only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csr, torch.sparse_bsc):
if a.dim() > 2:
expect_error = True
# CSC -> BSR only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csc, torch.sparse_bsr):
if a.dim() > 2:
expect_error = True
# CSC -> BSC only works for non-batched inputs
if (layout_a, layout_b) == (torch.sparse_csc, torch.sparse_bsc):
if a.dim() > 2:
expect_error = True
blocksize_a = (1, 1) if layout_a in {torch.sparse_bsr, torch.sparse_bsc} else None
blocksize_b = (1, 1) if layout_b in {torch.sparse_bsr, torch.sparse_bsc} else None
b = a.to_sparse(layout=layout_a, blocksize=blocksize_a)
if expect_error:
with self.assertRaises(RuntimeError):
b.to_sparse(layout=layout_b, blocksize=blocksize_b)
else:
c = b.to_sparse(layout=layout_b, blocksize=blocksize_b)
self.assertEqual(a.to_dense(), c.to_dense())
# change of blocksize upon conversion is not yet supported.
if b.layout in block_layouts:
for block_layout in block_layouts:
with self.assertRaisesRegex(RuntimeError,
"conversion from.*to.*with blocksize changed from.*to.*is not supported"):
b.to_sparse(layout=block_layout, blocksize=(3, 3))
batch_dims = [(), (2,), (2, 2), (2, 2, 2)]
sparse_dims = (6, 12)
for batch_dim in batch_dims:
a = make_tensor(batch_dim + sparse_dims, dtype=torch.float, device=device)
_to_from_layout(from_layout, to_layout, a)
@skipMeta
@all_sparse_compressed_layouts()
@batched_nonbatched()
@hybrid_nonhybrid()
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_dense_to_from_sparse_compressed(self, device, hybrid, batched, layout):
"""This test tests conversion from dense to/from CSR and CSC
by comparing to SciPy's implementation.
Here we test only those conversion combinations that SciPy
supports to ensure that PyTorch conversions are in the same
page with SciPy. Independent from SciPy, all conversion
combinations are tested in TestSparseAny.test_to_sparse.
"""
blocked_layouts = (torch.sparse_bsr, torch.sparse_bsc)
# helpers
def _check_against_scipy_matrix(pt_matrix, dense, blocksize, **kwargs):
# scipy has no bsc layout, so we check against the bsr layout of the tranposed dense
if layout == torch.sparse_bsc:
sp_matrix = self._construct_sp_matrix(dense.t(), layout=torch.sparse_bsr, blocksize=blocksize[::-1])
else:
sp_matrix = self._construct_sp_matrix(dense, layout=layout, blocksize=blocksize)
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
self.assertEqual(layout, pt_matrix.layout)
if layout == torch.sparse_bsc:
self.assertEqual(sp_matrix.shape[::-1], pt_matrix.shape)
else:
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
if layout == torch.sparse_bsc:
# we must tranpose the blocks before comparing
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values().transpose(-2, -1))
else:
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
def _check_hybrid_matrix(pt_matrix, dense, blocksize, **kwargs):
# Calculate COO indices for sparse matrix.
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
compressed_indices = compressed_indices_mth(pt_matrix)
plain_indices = plain_indices_mth(pt_matrix)
coo_indices = torch._convert_indices_from_csr_to_coo(compressed_indices, plain_indices)
row_indices, col_indices = {
torch.sparse_csr: (coo_indices[0, ], coo_indices[1, ]),
torch.sparse_csc: (coo_indices[1, ], coo_indices[0, ]),
torch.sparse_bsr: (coo_indices[0, ], coo_indices[1, ]),
torch.sparse_bsc: (coo_indices[1, ], coo_indices[0, ]),
}[pt_matrix.layout]
# If sparse matrix layout blocked, rearrange dense matrix
# so that the shape past first two dimensions match the
# shape of sparse matrix values.
dense_to_check = dense
if blocksize:
dense_shape = dense.shape
dense_to_check_shape = (dense.shape[0] // blocksize[0],
blocksize[0],
dense.shape[1] // blocksize[1],
blocksize[1]) + dense.shape[2:]
dense_to_check = dense_to_check.reshape(dense_to_check_shape).transpose(1, 2)
# Verify that non-zero values of the sparse matrix are
# equal to corresponding values of the dense matrix.
self.assertEqual(pt_matrix.values(), dense_to_check[row_indices, col_indices])
# Verify that the remaining elements of the dense matrix
# are 0, i.e. that dense are sparse matrix are fully
# equal.
mask = torch.ones_like(dense_to_check, dtype=torch.bool)
mask[row_indices, col_indices] = False
self.assertTrue(torch.all(torch.masked_select(dense_to_check, mask) == 0))
def _check_batched(pt_tensor, dense, check_batch=None, batch_shape=(), blocksize=(), **kwargs):
self.assertEqual(layout, pt_tensor.layout)
self.assertEqual(pt_tensor.shape, dense.shape)
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
for batch_index in np.ndindex(batch_shape):
pt_matrix = pt_tensor[batch_index]
dense_matrix = dense[batch_index]
dense_dim = pt_matrix.dim() - 2
dense_matrix_pt = dense_matrix.to_sparse(layout=layout,
blocksize=blocksize or None,
dense_dim=dense_dim)
# sanity check, selecting batch of to_<layout> and dense[batch].to_<layout> should give the same result
self.assertEqual(pt_matrix, dense_matrix_pt)
check_batch(pt_matrix, dense_matrix, blocksize, **kwargs)
def _generate_subject(sparse_shape, batch_shape, hybrid_shape):
shape = batch_shape + sparse_shape + hybrid_shape
n_batch_dim = len(batch_shape)
n_hybrid_dim = len(hybrid_shape)
# generate a dense tensor
dense = make_tensor(shape, dtype=torch.float, device=device)
# introduce some sparsty, mask is sparse shape, element applies to entire dense sub-tensor (hybrid) and is
# applied to each batch
mask = make_tensor(sparse_shape, dtype=torch.bool, device=device)
# manually expand to match hybrid shape
if hybrid:
mask = mask.view(sparse_shape + tuple(1 for _ in range(n_hybrid_dim)))
mask = mask.expand(sparse_shape + hybrid_shape)
# mask will broadcast over the batch dims if present
return dense * mask
# note: order is important here, the hybrid-ness decides the inner content check which is used to build the
# batched checker (if needed)
check_content = _check_against_scipy_matrix
if hybrid:
check_content = _check_hybrid_matrix
if batched:
check_content = functools.partial(_check_batched, check_batch=check_content)
sparse_sizes = [(6, 10), (0, 10), (6, 0), (0, 0)]
blocksizes = [(2, 2), (1, 1), (1, 2)] if layout in blocked_layouts else [()]
batch_sizes = [(3,), (1, 3), (2, 1, 3)] if batched else [()]
hybrid_sizes = [(4, ), (2, 2)] if hybrid else [()]
# general cases, always run
for sparse_shape, blocksize, batch_shape, hybrid_shape in itertools.product(
sparse_sizes, blocksizes, batch_sizes, hybrid_sizes):
dense = _generate_subject(sparse_shape, batch_shape, hybrid_shape)
sparse = dense.to_sparse(layout=layout, blocksize=blocksize or None, dense_dim=len(hybrid_shape))
check_content(sparse, dense, blocksize=blocksize, batch_shape=batch_shape, hybrid_shape=hybrid_shape)
dense_back = sparse.to_dense()
self.assertEqual(dense, dense_back)
# special cases for batched tensors
if batched:
# batched sparse tensors need only have the same number of non-zeros in each batch not nessesarily the
# same sparsity pattern in each batch
sparse_shape = sparse_sizes[0]
hybrid_shape = hybrid_sizes[0]
batch_shape = batch_sizes[0]
shape = batch_shape + sparse_shape + hybrid_shape
dense = make_tensor(shape, dtype=torch.float, device=device)
blocksize = blocksizes[0]
# number of elements/blocks in each batch (total not nnz)
batch_mask_shape = sparse_shape
if layout in blocked_layouts:
# if we are blocked the mask is genereated for the block valued elemetns
batch_mask_shape = sparse_shape[0] // blocksize[0], sparse_shape[1] // blocksize[1]
# random bool vector w/ length equal to max possible nnz for the sparse_shape
mask_source = make_tensor(batch_mask_shape, dtype=torch.bool, device=device).flatten()
n_batch = functools.reduce(operator.mul, batch_shape, 1)
# stack random permutations of the source for each batch
mask = torch.stack([mask_source[torch.randperm(mask_source.numel())]
for _ in range(n_batch)], dim=0).reshape(batch_shape + batch_mask_shape)
if layout in blocked_layouts:
# for blocked we need to do a bit of extra work to expand the mask from blocked-space to element-space
mask_shape = mask.shape
mask = mask.view(mask_shape + (1, 1))
mask = mask.expand(mask_shape + blocksize)
mask = mask.transpose(-3, -2)
mask = mask.flatten(-4, -3).flatten(-2, -1)
mask_shape = mask.shape
mask = mask.view(mask_shape + (1,) * len(hybrid_shape))
mask = mask.expand(mask_shape + hybrid_shape)
dense = dense * mask
sparse = dense.to_sparse(layout=layout, blocksize=blocksize or None, dense_dim=len(hybrid_shape))
check_content(sparse, dense, blocksize=blocksize, batch_shape=batch_shape, hybrid_shape=hybrid_shape)
dense_back = sparse.to_dense()
self.assertEqual(dense, dense_back)
# if batches have different nnz we expect the conversion to throw
mask_0 = mask[0]
mask_1 = mask[0].clone().fill_(True)
mask_2 = mask[0].clone().fill_(False)
mask_true = mask_source.clone().fill_(True)
mask_false = mask_source.clone().fill_(False)
mask = torch.stack([(mask_0, mask_1, mask_2)[i % 3] for i in range(n_batch)], dim=0).reshape(batch_shape + mask_0.shape)
dense = make_tensor(shape, dtype=torch.float, device=device)
dense = dense * mask
msg = "Expect the same number of specified elements per batch."
with self.assertRaisesRegex(RuntimeError, msg):
dense.to_sparse(layout=layout, blocksize=blocksize or None)
# Should throw if there is a zero in the batch size
dense = make_tensor((0,) + shape, dtype=torch.float, device=device)
layout_code = str(layout).split("_")[-1]
msg = f"to_sparse_{layout_code}: Expected product of batch dimensions to be non-zero."
with self.assertRaisesRegex(RuntimeError, msg):
dense.to_sparse(layout=layout, blocksize=blocksize or None)
@skipMeta
@all_sparse_compressed_layouts()
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_sparse_to_sparse_compressed(self, device, dtype, coalesced, layout):
"""
This test tests conversion from COO to CSR and CSC and CSC to CSR and CSC
by comparing to SciPy's implementation.
Here we test only those conversion combinations that SciPy
supports to ensure that PyTorch conversions are in the same
page with SciPy. Independent from SciPy, all conversion
combinations are tested in TestSparseAny.test_to_sparse.
"""
blocksize_kw = {}
if layout in (torch.sparse_bsc, torch.sparse_bsr):
blocksize_kw['blocksize'] = (2, 2)
# block modes don't support 0 width/height
shapes = [(6, 10)]
elif layout in (torch.sparse_csc, torch.sparse_csr):
shapes = [(0, 10), (6, 0), (6, 10), (0, 0)]
else:
raise NotImplementedError("unhandled layout")
if layout in (torch.sparse_bsc, torch.sparse_csc):
compressed_indices_mth = torch.Tensor.ccol_indices
plain_indices_mth = torch.Tensor.row_indices
elif layout in (torch.sparse_bsr, torch.sparse_csr):
compressed_indices_mth = torch.Tensor.crow_indices
plain_indices_mth = torch.Tensor.col_indices
else:
raise NotImplementedError("unhandled layout")
for shape in shapes:
sparse_dim = 2
nnz = shape[0] * shape[1] // 2
sparse, _, _ = self.genSparseTensor(shape, sparse_dim, nnz, coalesced, device, dtype)
sp_matrix = self._construct_sp_matrix(sparse, layout)
pt_matrix = sparse.to_sparse(layout=layout, **blocksize_kw)
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
sparse_csc = sparse.to_sparse_csc()
sp_matrix = self._construct_sp_matrix(sparse_csc, layout)
pt_matrix = sparse_csc.to_sparse(layout=layout, **blocksize_kw)
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
@unittest.skipIf(not TEST_CUDA_CUDSS, "The test requires cudss")
@dtypes(*floating_types())
def test_linalg_solve_sparse_csr_cusolver(self, device, dtype):
# https://github.com/krshrimali/pytorch/blob/f5ee21dd87a7c5e67ba03bfd77ea22246cabdf0b/test/test_sparse_csr.py
try:
spd = torch.rand(4, 3)
A = spd.T @ spd
b = torch.rand(3).cuda()
A = A.to_sparse_csr().cuda()
x = torch.sparse.spsolve(A, b)
except RuntimeError as e:
if "Calling linear solver with sparse tensors requires compiling " in str(e):
self.skipTest("PyTorch was not built with cuDSS support")
samples = sample_inputs_linalg_solve(None, device, dtype)
for sample in samples:
if sample.input.ndim != 2:
continue
out = torch.zeros(sample.args[0].size(), dtype=dtype, device=device)
if sample.args[0].ndim != 1 and sample.args[0].size(-1) != 1:
with self.assertRaisesRegex(RuntimeError, "b must be a 1D tensor"):
out = torch.linalg.solve(sample.input.to_sparse_csr(), *sample.args, **sample.kwargs)
break
if not sample.args[0].numel():
with self.assertRaisesRegex(RuntimeError,
"Expected non-empty other tensor, but found empty tensor"):
torch.linalg.solve(sample.input.to_sparse_csr(), *sample.args, **sample.kwargs, out=out)
break
expect = torch.linalg.solve(sample.input, *sample.args, **sample.kwargs)
sample.input = sample.input.to_sparse_csr()
if sample.args[0].ndim != 1 and sample.args[0].size(-1) == 1:
expect = expect.squeeze(-1)
sample.args = (sample.args[0].squeeze(-1), )
out = torch.linalg.solve(sample.input, *sample.args, **sample.kwargs)
self.assertEqual(expect, out)
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
test_csr_nnz
|
def test_csr_nnz(self):
# Tests the limits of the number of specified elements in CSR tensors, see gh-102520.
for nnz in [0, 2**31]:
rows, cols = 1, max(nnz, 1)
crow_indices = torch.tensor([0, nnz], dtype=torch.int64)
col_indices = torch.arange(nnz, dtype=torch.int64)
values = torch.ones(nnz, dtype=torch.int8)
a = torch.sparse_csr_tensor(crow_indices, col_indices, values, (rows, cols))
self.assertEqual(a._nnz(), nnz)
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
class TestSparseCSR(TestCase):
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
test_unsupported_dim
|
def test_unsupported_dim(self, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = torch.rand(128, 128, 128, device=device, dtype=torch.float16)
with self.assertRaisesRegex(RuntimeError, "Error original_tensor.dim"):
A_sparse = to_sparse_semi_structured(A)
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class TestSparseSemiStructured(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
setUp
|
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
super().setUp()
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
setUp
|
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
super().setUp()
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
run_test
|
def run_test(batch_shape, m, n, k, device, dtype, dtype_out, add_bias, activation, rtol, atol):
weight = rand_sparse_semi_structured(m, k, dtype, device)
input = make_tensor((*batch_shape, n, k), dtype=dtype, device=device)
bias = make_tensor((m,), dtype=dtype_out, device=device) if add_bias else None
dtype_dense = torch.float32
input_dense = input.to(dtype_dense)
weight_dense = weight.to(dtype_dense)
bias_dense = bias.to(dtype_dense) if add_bias else None
output0 = torch.nn.functional.linear(input_dense, weight_dense, bias=bias_dense)
if activation == "relu":
relu = torch.nn.ReLU()
output0 = relu(output0)
elif activation == "silu":
silu = torch.nn.SiLU()
output0 = silu(output0)
compressed = to_sparse_semi_structured(weight)
weight_sparse = compressed.values()
meta = compressed.indices()
output1 = torch._sparse_semi_structured_linear(input, weight_sparse, meta, bias=bias, activation=activation,
out_dtype=dtype_out if dtype == torch.int8 else None)
torch.testing.assert_close(output1.to(dtype_dense), output0, rtol=rtol, atol=atol)
if dtype == torch.float32:
# Inputs are converted to TF32 internally for sparse GEMM,
# so make dense GEMM to do the same for matching results.
orig = torch.backends.cuda.matmul.allow_tf32
torch.backends.cuda.matmul.allow_tf32 = True
batch_shapes = [[], [3], [3, 1]]
dtype_out = {torch.int8: torch.int32, torch.half: torch.half, torch.bfloat16: torch.bfloat16, torch.float32: torch.float32}
activations = [None, "relu", "silu"]
rtol, atol = 1e-3, 1e-3
if dtype == torch.bfloat16:
rtol, atol = 5e-3, 5e-3
elif dtype == torch.float32:
rtol, atol = 1e-3, 75e-2
for batch_shape, m, n, k, add_bias, activation in \
itertools.product(batch_shapes, range(3), range(3), range(3), (False, True), activations):
if activation == "silu" and dtype == torch.int8:
continue # SiLU not supported for integer inputs
m = 2 ** m * 32
n = 2 ** n * 32
k = 2 ** k * 128
run_test(batch_shape, m, n, k, device, dtype, dtype_out[dtype], add_bias, activation, rtol, atol)
if dtype == torch.float32:
torch.backends.cuda.matmul.allow_tf32 = orig
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
run_test
|
def run_test(batch_shape, m, n, k, device, dtype, dtype_out, add_bias, activation, rtol, atol):
weight = rand_sparse_semi_structured(m, k, dtype, device)
input = make_tensor((*batch_shape, n, k), dtype=dtype, device=device)
bias = make_tensor((m,), dtype=dtype_out, device=device) if add_bias else None
dtype_dense = torch.float32
input_dense = input.to(dtype_dense)
weight_dense = weight.to(dtype_dense)
bias_dense = bias.to(dtype_dense) if add_bias else None
output0 = torch.nn.functional.linear(input_dense, weight_dense, bias=bias_dense)
if activation == "relu":
relu = torch.nn.ReLU()
output0 = relu(output0)
elif activation == "silu":
silu = torch.nn.SiLU()
output0 = silu(output0)
compressed = to_sparse_semi_structured(weight)
weight_sparse = compressed.values()
meta = compressed.indices()
output1 = torch._sparse_semi_structured_linear(input, weight_sparse, meta, bias=bias, activation=activation,
out_dtype=dtype_out if dtype == torch.int8 else None)
torch.testing.assert_close(output1.to(dtype_dense), output0, rtol=rtol, atol=atol)
if dtype == torch.float32:
# Inputs are converted to TF32 internally for sparse GEMM,
# so make dense GEMM to do the same for matching results.
orig = torch.backends.cuda.matmul.allow_tf32
torch.backends.cuda.matmul.allow_tf32 = True
batch_shapes = [[], [3], [3, 1]]
dtype_out = {torch.int8: torch.int32, torch.half: torch.half, torch.bfloat16: torch.bfloat16, torch.float32: torch.float32}
activations = [None, "relu", "silu"]
rtol, atol = 1e-3, 1e-3
if dtype == torch.bfloat16:
rtol, atol = 5e-3, 5e-3
elif dtype == torch.float32:
rtol, atol = 1e-3, 75e-2
for batch_shape, m, n, k, add_bias, activation in \
itertools.product(batch_shapes, range(3), range(3), range(3), (False, True), activations):
if activation == "silu" and dtype == torch.int8:
continue # SiLU not supported for integer inputs
m = 2 ** m * 32
n = 2 ** n * 32
k = 2 ** k * 128
run_test(batch_shape, m, n, k, device, dtype, dtype_out[dtype], add_bias, activation, rtol, atol)
if dtype == torch.float32:
torch.backends.cuda.matmul.allow_tf32 = orig
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
run_test
|
def run_test(batch_shape, m, n, k, device, dtype, dtype_out, add_bias, activation, rtol, atol):
weight = rand_sparse_semi_structured(m, k, dtype, device)
input = make_tensor((*batch_shape, n, k), dtype=dtype, device=device)
bias = make_tensor((m,), dtype=dtype_out, device=device) if add_bias else None
dtype_dense = torch.float32
input_dense = input.to(dtype_dense)
weight_dense = weight.to(dtype_dense)
bias_dense = bias.to(dtype_dense) if add_bias else None
output0 = torch.nn.functional.linear(input_dense, weight_dense, bias=bias_dense)
if activation == "relu":
relu = torch.nn.ReLU()
output0 = relu(output0)
elif activation == "silu":
silu = torch.nn.SiLU()
output0 = silu(output0)
compressed = to_sparse_semi_structured(weight)
weight_sparse = compressed.values()
meta = compressed.indices()
output1 = torch._sparse_semi_structured_linear(input, weight_sparse, meta, bias=bias, activation=activation,
out_dtype=dtype_out if dtype == torch.int8 else None)
torch.testing.assert_close(output1.to(dtype_dense), output0, rtol=rtol, atol=atol)
if dtype == torch.float32:
# Inputs are converted to TF32 internally for sparse GEMM,
# so make dense GEMM to do the same for matching results.
orig = torch.backends.cuda.matmul.allow_tf32
torch.backends.cuda.matmul.allow_tf32 = True
batch_shapes = [[], [3], [3, 1]]
dtype_out = {torch.int8: torch.int32, torch.half: torch.half, torch.bfloat16: torch.bfloat16, torch.float32: torch.float32}
activations = [None, "relu", "silu"]
rtol, atol = 1e-3, 1e-3
if dtype == torch.bfloat16:
rtol, atol = 5e-3, 5e-3
elif dtype == torch.float32:
rtol, atol = 1e-3, 75e-2
for batch_shape, m, n, k, add_bias, activation in \
itertools.product(batch_shapes, range(3), range(3), range(3), (False, True), activations):
if activation == "silu" and dtype == torch.int8:
continue # SiLU not supported for integer inputs
m = 2 ** m * 32
n = 2 ** n * 32
k = 2 ** k * 128
run_test(batch_shape, m, n, k, device, dtype, dtype_out[dtype], add_bias, activation, rtol, atol)
if dtype == torch.float32:
torch.backends.cuda.matmul.allow_tf32 = orig
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
test_conversions_all_patterns
|
def test_conversions_all_patterns(self, device, dtype):
r, c = 32, 128
dense_inv, dense_val = rand_sparse_semi_structured_all_patterns(r, c, dtype, device)
compressed = to_sparse_semi_structured(dense_inv)
dense = compressed.to_dense()
torch.testing.assert_close(dense, dense_val, rtol=0, atol=0)
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class TestSparseSemiStructuredCUTLASS(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
setUp
|
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
super().setUp()
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
test_cslt_sparse_mm_search
|
def test_cslt_sparse_mm_search(self, device, dtype):
A = rand_sparse_semi_structured_mask(128, 128, dtype=dtype)
A_compressed = torch._cslt_compress(A)
B = torch.ones((128, 128), device=device).to(dtype)
A_compressed = torch._cslt_compress(A)
alg_id = torch._cslt_sparse_mm_search(A_compressed, B.t())
# for cuSPARSELt v0.4.0 there is a bug where although there are 5 alg_ids, we run into an error
# when setting using the last one (4)
# in cuSPARSELt v0.5.0 there are only 4 alg_ids total, so we should remove the +1 here when we update.
# TODO Move this into the cuSPARSELt backendk
assert alg_id in range(CUSPARSELT_NUM_ALG_IDS + 1)
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
CUSPARSELT_NUM_ALG_IDS = 4
CUSPARSELT_MIXED_DTYPE_SUPPORT = [torch.float16, torch.bfloat16, torch.int32]
class TestSparseSemiStructuredCUSPARSELT(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
test_cusparselt_backend
|
def test_cusparselt_backend(self):
version = _get_torch_cuda_version()
assert torch.backends.cusparselt.is_available()
# CUDA 11.8 has cuSPARSELt v0.4.0 support
if version == (11, 8):
assert torch.backends.cusparselt.version() == 400
# CUDA 12.1 has cuSPARSELt v0.5.2 support
elif version == (12, 1):
assert torch.backends.cusparselt.version() == 502
# CUDA 12.4+ has cuSPARSELt v0.6.2 support
elif version >= (12, 4):
assert torch.backends.cusparselt.version() == 602
else:
assert torch.backends.cusparselt.version() is None
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
CUSPARSELT_NUM_ALG_IDS = 4
CUSPARSELT_MIXED_DTYPE_SUPPORT = [torch.float16, torch.bfloat16, torch.int32]
class TestSparseSemiStructuredCUSPARSELT(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_spectral_ops.py
|
skip_helper_for_fft
|
def skip_helper_for_fft(device, dtype):
device_type = torch.device(device).type
if dtype not in (torch.half, torch.complex32):
return
if device_type == 'cpu':
raise unittest.SkipTest("half and complex32 are not supported on CPU")
if TEST_WITH_ROCM:
raise unittest.SkipTest("half and complex32 are not supported on ROCM")
if not SM53OrLater:
raise unittest.SkipTest("half and complex32 are only supported on CUDA device with SM>53")
# Tests of functions related to Fourier analysis in the torch.fft namespace
class TestFFT(TestCase):
exact_dtype = True
@onlyNativeDeviceTypes
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.OneD],
allowed_dtypes=(torch.float, torch.cfloat))
def test_reference_1d(self, device, dtype, op):
if op.ref is None:
raise unittest.SkipTest("No reference implementation")
norm_modes = REFERENCE_NORM_MODES
test_args = [
*product(
# input
(torch.randn(67, device=device, dtype=dtype),
torch.randn(80, device=device, dtype=dtype),
torch.randn(12, 14, device=device, dtype=dtype),
torch.randn(9, 6, 3, device=device, dtype=dtype)),
# n
(None, 50, 6),
# dim
(-1, 0),
# norm
norm_modes
),
# Test transforming middle dimensions of multi-dim tensor
*product(
(torch.randn(4, 5, 6, 7, device=device, dtype=dtype),),
(None,),
(1, 2, -2,),
norm_modes
)
]
for iargs in test_args:
args = list(iargs)
input = args[0]
args = args[1:]
expected = op.ref(input.cpu().numpy(), *args)
exact_dtype = dtype in (torch.double, torch.complex128)
actual = op(input, *args)
self.assertEqual(actual, expected, exact_dtype=exact_dtype)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
torch.chalf : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double, torch.complex32, torch.complex64, torch.complex128)
def test_fft_round_trip(self, device, dtype):
skip_helper_for_fft(device, dtype)
# Test that round trip through ifft(fft(x)) is the identity
if dtype not in (torch.half, torch.complex32):
test_args = list(product(
# input
(torch.randn(67, device=device, dtype=dtype),
torch.randn(80, device=device, dtype=dtype),
torch.randn(12, 14, device=device, dtype=dtype),
torch.randn(9, 6, 3, device=device, dtype=dtype)),
# dim
(-1, 0),
# norm
(None, "forward", "backward", "ortho")
))
else:
# cuFFT supports powers of 2 for half and complex half precision
test_args = list(product(
# input
(torch.randn(64, device=device, dtype=dtype),
torch.randn(128, device=device, dtype=dtype),
torch.randn(4, 16, device=device, dtype=dtype),
torch.randn(8, 6, 2, device=device, dtype=dtype)),
# dim
(-1, 0),
# norm
(None, "forward", "backward", "ortho")
))
fft_functions = [(torch.fft.fft, torch.fft.ifft)]
# Real-only functions
if not dtype.is_complex:
# NOTE: Using ihfft as "forward" transform to avoid needing to
# generate true half-complex input
fft_functions += [(torch.fft.rfft, torch.fft.irfft),
(torch.fft.ihfft, torch.fft.hfft)]
for forward, backward in fft_functions:
for x, dim, norm in test_args:
kwargs = {
'n': x.size(dim),
'dim': dim,
'norm': norm,
}
y = backward(forward(x, **kwargs), **kwargs)
if x.dtype is torch.half and y.dtype is torch.complex32:
# Since type promotion currently doesn't work with complex32
# manually promote `x` to complex32
x = x.to(torch.complex32)
# For real input, ifft(fft(x)) will convert to complex
self.assertEqual(x, y, exact_dtype=(
forward != torch.fft.fft or x.is_complex()))
# Note: NumPy will throw a ValueError for an empty input
@onlyNativeDeviceTypes
@ops(spectral_funcs, allowed_dtypes=(torch.half, torch.float, torch.complex32, torch.cfloat))
def test_empty_fft(self, device, dtype, op):
t = torch.empty(1, 0, device=device, dtype=dtype)
match = r"Invalid number of data points \([-\d]*\) specified"
with self.assertRaisesRegex(RuntimeError, match):
op(t)
@onlyNativeDeviceTypes
def test_empty_ifft(self, device):
t = torch.empty(2, 1, device=device, dtype=torch.complex64)
match = r"Invalid number of data points \([-\d]*\) specified"
for f in [torch.fft.irfft, torch.fft.irfft2, torch.fft.irfftn,
torch.fft.hfft, torch.fft.hfft2, torch.fft.hfftn]:
with self.assertRaisesRegex(RuntimeError, match):
f(t)
@onlyNativeDeviceTypes
def test_fft_invalid_dtypes(self, device):
t = torch.randn(64, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "rfft expects a real input tensor"):
torch.fft.rfft(t)
with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input tensor"):
torch.fft.rfftn(t)
with self.assertRaisesRegex(RuntimeError, "ihfft expects a real input tensor"):
torch.fft.ihfft(t)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_fft_type_promotion(self, device, dtype):
skip_helper_for_fft(device, dtype)
if dtype.is_complex or dtype.is_floating_point:
t = torch.randn(64, device=device, dtype=dtype)
else:
t = torch.randint(-2, 2, (64,), device=device, dtype=dtype)
PROMOTION_MAP = {
torch.int8: torch.complex64,
torch.half: torch.complex32,
torch.float: torch.complex64,
torch.double: torch.complex128,
torch.complex32: torch.complex32,
torch.complex64: torch.complex64,
torch.complex128: torch.complex128,
}
T = torch.fft.fft(t)
self.assertEqual(T.dtype, PROMOTION_MAP[dtype])
PROMOTION_MAP_C2R = {
torch.int8: torch.float,
torch.half: torch.half,
torch.float: torch.float,
torch.double: torch.double,
torch.complex32: torch.half,
torch.complex64: torch.float,
torch.complex128: torch.double,
}
if dtype in (torch.half, torch.complex32):
# cuFFT supports powers of 2 for half and complex half precision
# NOTE: With hfft and default args where output_size n=2*(input_size - 1),
# we make sure that logical fft size is a power of two.
x = torch.randn(65, device=device, dtype=dtype)
R = torch.fft.hfft(x)
else:
R = torch.fft.hfft(t)
self.assertEqual(R.dtype, PROMOTION_MAP_C2R[dtype])
if not dtype.is_complex:
PROMOTION_MAP_R2C = {
torch.int8: torch.complex64,
torch.half: torch.complex32,
torch.float: torch.complex64,
torch.double: torch.complex128,
}
C = torch.fft.rfft(t)
self.assertEqual(C.dtype, PROMOTION_MAP_R2C[dtype])
@onlyNativeDeviceTypes
@ops(spectral_funcs, dtypes=OpDTypes.unsupported,
allowed_dtypes=[torch.half, torch.bfloat16])
def test_fft_half_and_bfloat16_errors(self, device, dtype, op):
# TODO: Remove torch.half error when complex32 is fully implemented
sample = first_sample(self, op.sample_inputs(device, dtype))
device_type = torch.device(device).type
if dtype is torch.half and device_type == 'cuda' and TEST_WITH_ROCM:
err_msg = "Unsupported dtype "
elif dtype is torch.half and device_type == 'cuda' and not SM53OrLater:
err_msg = "cuFFT doesn't support signals of half type with compute capability less than SM_53"
else:
err_msg = "Unsupported dtype "
with self.assertRaisesRegex(RuntimeError, err_msg):
op(sample.input, *sample.args, **sample.kwargs)
@onlyNativeDeviceTypes
@ops(spectral_funcs, allowed_dtypes=(torch.half, torch.chalf))
def test_fft_half_and_chalf_not_power_of_two_error(self, device, dtype, op):
t = make_tensor(13, 13, device=device, dtype=dtype)
err_msg = "cuFFT only supports dimensions whose sizes are powers of two"
with self.assertRaisesRegex(RuntimeError, err_msg):
op(t)
if op.ndimensional in (SpectralFuncType.ND, SpectralFuncType.TwoD):
kwargs = {'s': (12, 12)}
else:
kwargs = {'n': 12}
with self.assertRaisesRegex(RuntimeError, err_msg):
op(t, **kwargs)
# nd-fft tests
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.ND],
allowed_dtypes=(torch.cfloat, torch.cdouble))
def test_reference_nd(self, device, dtype, op):
if op.ref is None:
raise unittest.SkipTest("No reference implementation")
norm_modes = REFERENCE_NORM_MODES
# input_ndim, s, dim
transform_desc = [
*product(range(2, 5), (None,), (None, (0,), (0, -1))),
*product(range(2, 5), (None, (4, 10)), (None,)),
(6, None, None),
(5, None, (1, 3, 4)),
(3, None, (1,)),
(1, None, (0,)),
(4, (10, 10), None),
(4, (10, 10), (0, 1))
]
for input_ndim, s, dim in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
input = torch.randn(*shape, device=device, dtype=dtype)
for norm in norm_modes:
expected = op.ref(input.cpu().numpy(), s, dim, norm)
exact_dtype = dtype in (torch.double, torch.complex128)
actual = op(input, s, dim, norm)
self.assertEqual(actual, expected, exact_dtype=exact_dtype)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
torch.chalf : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_fftn_round_trip(self, device, dtype):
skip_helper_for_fft(device, dtype)
norm_modes = (None, "forward", "backward", "ortho")
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(7, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, 0),
]
fft_functions = [(torch.fft.fftn, torch.fft.ifftn)]
# Real-only functions
if not dtype.is_complex:
# NOTE: Using ihfftn as "forward" transform to avoid needing to
# generate true half-complex input
fft_functions += [(torch.fft.rfftn, torch.fft.irfftn),
(torch.fft.ihfftn, torch.fft.hfftn)]
for input_ndim, dim in transform_desc:
if dtype in (torch.half, torch.complex32):
# cuFFT supports powers of 2 for half and complex half precision
shape = itertools.islice(itertools.cycle((2, 4, 8)), input_ndim)
else:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
x = torch.randn(*shape, device=device, dtype=dtype)
for (forward, backward), norm in product(fft_functions, norm_modes):
if isinstance(dim, tuple):
s = [x.size(d) for d in dim]
else:
s = x.size() if dim is None else x.size(dim)
kwargs = {'s': s, 'dim': dim, 'norm': norm}
y = backward(forward(x, **kwargs), **kwargs)
# For real input, ifftn(fftn(x)) will convert to complex
if x.dtype is torch.half and y.dtype is torch.chalf:
# Since type promotion currently doesn't work with complex32
# manually promote `x` to complex32
self.assertEqual(x.to(torch.chalf), y)
else:
self.assertEqual(x, y, exact_dtype=(
forward != torch.fft.fftn or x.is_complex()))
@onlyNativeDeviceTypes
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.ND],
allowed_dtypes=[torch.float, torch.cfloat])
def test_fftn_invalid(self, device, dtype, op):
a = torch.rand(10, 10, 10, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
op(a, dim=(0, 1, 0))
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
op(a, dim=(2, -1))
with self.assertRaisesRegex(RuntimeError, "dim and shape .* same length"):
op(a, s=(1,), dim=(0, 1))
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
op(a, dim=(3,))
with self.assertRaisesRegex(RuntimeError, "tensor only has 3 dimensions"):
op(a, s=(10, 10, 10, 10))
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double)
def test_hfftn(self, device, dtype):
skip_helper_for_fft(device, dtype)
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(6, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, (0,)),
(4, (0, 1))
]
for input_ndim, dim in transform_desc:
actual_dims = list(range(input_ndim)) if dim is None else dim
if dtype is torch.half:
shape = tuple(itertools.islice(itertools.cycle((2, 4, 8)), input_ndim))
else:
shape = tuple(itertools.islice(itertools.cycle(range(4, 9)), input_ndim))
expect = torch.randn(*shape, device=device, dtype=dtype)
input = torch.fft.ifftn(expect, dim=dim, norm="ortho")
lastdim = actual_dims[-1]
lastdim_size = input.size(lastdim) // 2 + 1
idx = [slice(None)] * input_ndim
idx[lastdim] = slice(0, lastdim_size)
input = input[idx]
s = [shape[dim] for dim in actual_dims]
actual = torch.fft.hfftn(input, s=s, dim=dim, norm="ortho")
self.assertEqual(expect, actual)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double)
def test_ihfftn(self, device, dtype):
skip_helper_for_fft(device, dtype)
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(6, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, (0,)),
(4, (0, 1))
]
for input_ndim, dim in transform_desc:
if dtype is torch.half:
shape = tuple(itertools.islice(itertools.cycle((2, 4, 8)), input_ndim))
else:
shape = tuple(itertools.islice(itertools.cycle(range(4, 9)), input_ndim))
input = torch.randn(*shape, device=device, dtype=dtype)
expect = torch.fft.ifftn(input, dim=dim, norm="ortho")
# Slice off the half-symmetric component
lastdim = -1 if dim is None else dim[-1]
lastdim_size = expect.size(lastdim) // 2 + 1
idx = [slice(None)] * input_ndim
idx[lastdim] = slice(0, lastdim_size)
expect = expect[idx]
actual = torch.fft.ihfftn(input, dim=dim, norm="ortho")
self.assertEqual(expect, actual)
# 2d-fft tests
# NOTE: 2d transforms are only thin wrappers over n-dim transforms,
# so don't require exhaustive testing.
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.complex128)
def test_fft2_numpy(self, device, dtype):
norm_modes = REFERENCE_NORM_MODES
# input_ndim, s
transform_desc = [
*product(range(2, 5), (None, (4, 10))),
]
fft_functions = ['fft2', 'ifft2', 'irfft2', 'hfft2']
if dtype.is_floating_point:
fft_functions += ['rfft2', 'ihfft2']
for input_ndim, s in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
input = torch.randn(*shape, device=device, dtype=dtype)
for fname, norm in product(fft_functions, norm_modes):
torch_fn = getattr(torch.fft, fname)
if "hfft" in fname:
if not has_scipy_fft:
continue # Requires scipy to compare against
numpy_fn = getattr(scipy.fft, fname)
else:
numpy_fn = getattr(np.fft, fname)
def fn(t: torch.Tensor, s: Optional[List[int]], dim: List[int] = (-2, -1), norm: Optional[str] = None):
return torch_fn(t, s, dim, norm)
torch_fns = (torch_fn, torch.jit.script(fn))
# Once with dim defaulted
input_np = input.cpu().numpy()
expected = numpy_fn(input_np, s, norm=norm)
for fn in torch_fns:
actual = fn(input, s, norm=norm)
self.assertEqual(actual, expected)
# Once with explicit dims
dim = (1, 0)
expected = numpy_fn(input_np, s, dim, norm)
for fn in torch_fns:
actual = fn(input, s, dim, norm)
self.assertEqual(actual, expected)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.complex64)
def test_fft2_fftn_equivalence(self, device, dtype):
norm_modes = (None, "forward", "backward", "ortho")
# input_ndim, s, dim
transform_desc = [
*product(range(2, 5), (None, (4, 10)), (None, (1, 0))),
(3, None, (0, 2)),
]
fft_functions = ['fft', 'ifft', 'irfft', 'hfft']
# Real-only functions
if dtype.is_floating_point:
fft_functions += ['rfft', 'ihfft']
for input_ndim, s, dim in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
x = torch.randn(*shape, device=device, dtype=dtype)
for func, norm in product(fft_functions, norm_modes):
f2d = getattr(torch.fft, func + '2')
fnd = getattr(torch.fft, func + 'n')
kwargs = {'s': s, 'norm': norm}
if dim is not None:
kwargs['dim'] = dim
expect = fnd(x, **kwargs)
else:
expect = fnd(x, dim=(-2, -1), **kwargs)
actual = f2d(x, **kwargs)
self.assertEqual(actual, expect)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
def test_fft2_invalid(self, device):
a = torch.rand(10, 10, 10, device=device)
fft_funcs = (torch.fft.fft2, torch.fft.ifft2,
torch.fft.rfft2, torch.fft.irfft2)
for func in fft_funcs:
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
func(a, dim=(0, 0))
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
func(a, dim=(2, -1))
with self.assertRaisesRegex(RuntimeError, "dim and shape .* same length"):
func(a, s=(1,))
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
func(a, dim=(2, 3))
c = torch.complex(a, a)
with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input"):
torch.fft.rfft2(c)
# Helper functions
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double)
def test_fftfreq_numpy(self, device, dtype):
test_args = [
*product(
# n
range(1, 20),
# d
(None, 10.0),
)
]
functions = ['fftfreq', 'rfftfreq']
for fname in functions:
torch_fn = getattr(torch.fft, fname)
numpy_fn = getattr(np.fft, fname)
for n, d in test_args:
args = (n,) if d is None else (n, d)
expected = numpy_fn(*args)
actual = torch_fn(*args, device=device, dtype=dtype)
self.assertEqual(actual, expected, exact_dtype=False)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_fftfreq_out(self, device, dtype):
for func in (torch.fft.fftfreq, torch.fft.rfftfreq):
expect = func(n=100, d=.5, device=device, dtype=dtype)
actual = torch.empty((), device=device, dtype=dtype)
with self.assertWarnsRegex(UserWarning, "out tensor will be resized"):
func(n=100, d=.5, out=actual)
self.assertEqual(actual, expect)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_fftshift_numpy(self, device, dtype):
test_args = [
# shape, dim
*product(((11,), (12,)), (None, 0, -1)),
*product(((4, 5), (6, 6)), (None, 0, (-1,))),
*product(((1, 1, 4, 6, 7, 2),), (None, (3, 4))),
]
functions = ['fftshift', 'ifftshift']
for shape, dim in test_args:
input = torch.rand(*shape, device=device, dtype=dtype)
input_np = input.cpu().numpy()
for fname in functions:
torch_fn = getattr(torch.fft, fname)
numpy_fn = getattr(np.fft, fname)
expected = numpy_fn(input_np, axes=dim)
actual = torch_fn(input, dim=dim)
self.assertEqual(actual, expected)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double)
def test_fftshift_frequencies(self, device, dtype):
for n in range(10, 15):
sorted_fft_freqs = torch.arange(-(n // 2), n - (n // 2),
device=device, dtype=dtype)
x = torch.fft.fftfreq(n, d=1 / n, device=device, dtype=dtype)
# Test fftshift sorts the fftfreq output
shifted = torch.fft.fftshift(x)
self.assertEqual(shifted, shifted.sort().values)
self.assertEqual(sorted_fft_freqs, shifted)
# And ifftshift is the inverse
self.assertEqual(x, torch.fft.ifftshift(shifted))
# Legacy fft tests
def _test_fft_ifft_rfft_irfft(self, device, dtype):
complex_dtype = corresponding_complex_dtype(dtype)
def _test_complex(sizes, signal_ndim, prepro_fn=lambda x: x):
x = prepro_fn(torch.randn(*sizes, dtype=complex_dtype, device=device))
dim = tuple(range(-signal_ndim, 0))
for norm in ('ortho', None):
res = torch.fft.fftn(x, dim=dim, norm=norm)
rec = torch.fft.ifftn(res, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='fft and ifft')
res = torch.fft.ifftn(x, dim=dim, norm=norm)
rec = torch.fft.fftn(res, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='ifft and fft')
def _test_real(sizes, signal_ndim, prepro_fn=lambda x: x):
x = prepro_fn(torch.randn(*sizes, dtype=dtype, device=device))
signal_numel = 1
signal_sizes = x.size()[-signal_ndim:]
dim = tuple(range(-signal_ndim, 0))
for norm in (None, 'ortho'):
res = torch.fft.rfftn(x, dim=dim, norm=norm)
rec = torch.fft.irfftn(res, s=signal_sizes, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='rfft and irfft')
res = torch.fft.fftn(x, dim=dim, norm=norm)
rec = torch.fft.ifftn(res, dim=dim, norm=norm)
x_complex = torch.complex(x, torch.zeros_like(x))
self.assertEqual(x_complex, rec, atol=1e-8, rtol=0, msg='fft and ifft (from real)')
# contiguous case
_test_real((100,), 1)
_test_real((10, 1, 10, 100), 1)
_test_real((100, 100), 2)
_test_real((2, 2, 5, 80, 60), 2)
_test_real((50, 40, 70), 3)
_test_real((30, 1, 50, 25, 20), 3)
_test_complex((100,), 1)
_test_complex((100, 100), 1)
_test_complex((100, 100), 2)
_test_complex((1, 20, 80, 60), 2)
_test_complex((50, 40, 70), 3)
_test_complex((6, 5, 50, 25, 20), 3)
# non-contiguous case
_test_real((165,), 1, lambda x: x.narrow(0, 25, 100)) # input is not aligned to complex type
_test_real((100, 100, 3), 1, lambda x: x[:, :, 0])
_test_real((100, 100), 2, lambda x: x.t())
_test_real((20, 100, 10, 10), 2, lambda x: x.view(20, 100, 100)[:, :60])
_test_real((65, 80, 115), 3, lambda x: x[10:60, 13:53, 10:80])
_test_real((30, 20, 50, 25), 3, lambda x: x.transpose(1, 2).transpose(2, 3))
_test_complex((100,), 1, lambda x: x.expand(100, 100))
_test_complex((20, 90, 110), 2, lambda x: x[:, 5:85].narrow(2, 5, 100))
_test_complex((40, 60, 3, 80), 3, lambda x: x.transpose(2, 0).select(0, 2)[5:55, :, 10:])
_test_complex((30, 55, 50, 22), 3, lambda x: x[:, 3:53, 15:40, 1:21])
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_fft_ifft_rfft_irfft(self, device, dtype):
self._test_fft_ifft_rfft_irfft(device, dtype)
@deviceCountAtLeast(1)
@onlyCUDA
@dtypes(torch.double)
def test_cufft_plan_cache(self, devices, dtype):
@contextmanager
def plan_cache_max_size(device, n):
if device is None:
plan_cache = torch.backends.cuda.cufft_plan_cache
else:
plan_cache = torch.backends.cuda.cufft_plan_cache[device]
original = plan_cache.max_size
plan_cache.max_size = n
yield
plan_cache.max_size = original
with plan_cache_max_size(devices[0], max(1, torch.backends.cuda.cufft_plan_cache.size - 10)):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
with plan_cache_max_size(devices[0], 0):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
torch.backends.cuda.cufft_plan_cache.clear()
# check that stll works after clearing cache
with plan_cache_max_size(devices[0], 10):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
with self.assertRaisesRegex(RuntimeError, r"must be non-negative"):
torch.backends.cuda.cufft_plan_cache.max_size = -1
with self.assertRaisesRegex(RuntimeError, r"read-only property"):
torch.backends.cuda.cufft_plan_cache.size = -1
with self.assertRaisesRegex(RuntimeError, r"but got device with index"):
torch.backends.cuda.cufft_plan_cache[torch.cuda.device_count() + 10]
# Multigpu tests
if len(devices) > 1:
# Test that different GPU has different cache
x0 = torch.randn(2, 3, 3, device=devices[0])
x1 = x0.to(devices[1])
self.assertEqual(torch.fft.rfftn(x0, dim=(-2, -1)), torch.fft.rfftn(x1, dim=(-2, -1)))
# If a plan is used across different devices, the following line (or
# the assert above) would trigger illegal memory access. Other ways
# to trigger the error include
# (1) setting CUDA_LAUNCH_BLOCKING=1 (pytorch/pytorch#19224) and
# (2) printing a device 1 tensor.
x0.copy_(x1)
# Test that un-indexed `torch.backends.cuda.cufft_plan_cache` uses current device
with plan_cache_max_size(devices[0], 10):
with plan_cache_max_size(devices[1], 11):
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
with torch.cuda.device(devices[1]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(devices[0]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
with torch.cuda.device(devices[1]):
with plan_cache_max_size(None, 11): # default is cuda:1
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(devices[0]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
# passes on ROCm w/ python 2.7, fails w/ python 3.6
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_stft(self, device, dtype):
if not TEST_LIBROSA:
raise unittest.SkipTest('librosa not found')
def librosa_stft(x, n_fft, hop_length, win_length, window, center):
if window is None:
window = np.ones(n_fft if win_length is None else win_length)
else:
window = window.cpu().numpy()
input_1d = x.dim() == 1
if input_1d:
x = x.view(1, -1)
# NOTE: librosa 0.9 changed default pad_mode to 'constant' (zero padding)
# however, we use the pre-0.9 default ('reflect')
pad_mode = 'reflect'
result = []
for xi in x:
ri = librosa.stft(xi.cpu().numpy(), n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
result.append(torch.from_numpy(np.stack([ri.real, ri.imag], -1)))
result = torch.stack(result, 0)
if input_1d:
result = result[0]
return result
def _test(sizes, n_fft, hop_length=None, win_length=None, win_sizes=None,
center=True, expected_error=None):
x = torch.randn(*sizes, dtype=dtype, device=device)
if win_sizes is not None:
window = torch.randn(*win_sizes, dtype=dtype, device=device)
else:
window = None
if expected_error is None:
result = x.stft(n_fft, hop_length, win_length, window,
center=center, return_complex=False)
# NB: librosa defaults to np.complex64 output, no matter what
# the input dtype
ref_result = librosa_stft(x, n_fft, hop_length, win_length, window, center)
self.assertEqual(result, ref_result, atol=7e-6, rtol=0, msg='stft comparison against librosa', exact_dtype=False)
# With return_complex=True, the result is the same but viewed as complex instead of real
result_complex = x.stft(n_fft, hop_length, win_length, window, center=center, return_complex=True)
self.assertEqual(result_complex, torch.view_as_complex(result))
else:
self.assertRaises(expected_error,
lambda: x.stft(n_fft, hop_length, win_length, window, center=center))
for center in [True, False]:
_test((10,), 7, center=center)
_test((10, 4000), 1024, center=center)
_test((10,), 7, 2, center=center)
_test((10, 4000), 1024, 512, center=center)
_test((10,), 7, 2, win_sizes=(7,), center=center)
_test((10, 4000), 1024, 512, win_sizes=(1024,), center=center)
# spectral oversample
_test((10,), 7, 2, win_length=5, center=center)
_test((10, 4000), 1024, 512, win_length=100, center=center)
_test((10, 4, 2), 1, 1, expected_error=RuntimeError)
_test((10,), 11, 1, center=False, expected_error=RuntimeError)
_test((10,), -1, 1, expected_error=RuntimeError)
_test((10,), 3, win_length=5, expected_error=RuntimeError)
_test((10,), 5, 4, win_sizes=(11,), expected_error=RuntimeError)
_test((10,), 5, 4, win_sizes=(1, 1), expected_error=RuntimeError)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_istft_against_librosa(self, device, dtype):
if not TEST_LIBROSA:
raise unittest.SkipTest('librosa not found')
def librosa_istft(x, n_fft, hop_length, win_length, window, length, center):
if window is None:
window = np.ones(n_fft if win_length is None else win_length)
else:
window = window.cpu().numpy()
return librosa.istft(x.cpu().numpy(), n_fft=n_fft, hop_length=hop_length,
win_length=win_length, length=length, window=window, center=center)
def _test(size, n_fft, hop_length=None, win_length=None, win_sizes=None,
length=None, center=True):
x = torch.randn(size, dtype=dtype, device=device)
if win_sizes is not None:
window = torch.randn(*win_sizes, dtype=dtype, device=device)
else:
window = None
x_stft = x.stft(n_fft, hop_length, win_length, window, center=center,
onesided=True, return_complex=True)
ref_result = librosa_istft(x_stft, n_fft, hop_length, win_length,
window, length, center)
result = x_stft.istft(n_fft, hop_length, win_length, window,
length=length, center=center)
self.assertEqual(result, ref_result)
for center in [True, False]:
_test(10, 7, center=center)
_test(4000, 1024, center=center)
_test(4000, 1024, center=center, length=4000)
_test(10, 7, 2, center=center)
_test(4000, 1024, 512, center=center)
_test(4000, 1024, 512, center=center, length=4000)
_test(10, 7, 2, win_sizes=(7,), center=center)
_test(4000, 1024, 512, win_sizes=(1024,), center=center)
_test(4000, 1024, 512, win_sizes=(1024,), center=center, length=4000)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double, torch.cdouble)
def test_complex_stft_roundtrip(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype),
torch.randn(12, 60, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# center
(True,),
# pad_mode
("constant", "reflect", "circular"),
# normalized
(True, False),
# onesided
(True, False) if not dtype.is_complex else (False,),
))
for args in test_args:
x, n_fft, hop_length, center, pad_mode, normalized, onesided = args
common_kwargs = {
'n_fft': n_fft, 'hop_length': hop_length, 'center': center,
'normalized': normalized, 'onesided': onesided,
}
# Functional interface
x_stft = torch.stft(x, pad_mode=pad_mode, return_complex=True, **common_kwargs)
x_roundtrip = torch.istft(x_stft, return_complex=dtype.is_complex,
length=x.size(-1), **common_kwargs)
self.assertEqual(x_roundtrip, x)
# Tensor method interface
x_stft = x.stft(pad_mode=pad_mode, return_complex=True, **common_kwargs)
x_roundtrip = torch.istft(x_stft, return_complex=dtype.is_complex,
length=x.size(-1), **common_kwargs)
self.assertEqual(x_roundtrip, x)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double, torch.cdouble)
def test_stft_roundtrip_complex_window(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype),
torch.randn(12, 60, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# pad_mode
("constant", "reflect", "replicate", "circular"),
# normalized
(True, False),
))
for args in test_args:
x, n_fft, hop_length, pad_mode, normalized = args
window = torch.rand(n_fft, device=device, dtype=torch.cdouble)
x_stft = torch.stft(
x, n_fft=n_fft, hop_length=hop_length, window=window,
center=True, pad_mode=pad_mode, normalized=normalized)
self.assertEqual(x_stft.dtype, torch.cdouble)
self.assertEqual(x_stft.size(-2), n_fft) # Not onesided
x_roundtrip = torch.istft(
x_stft, n_fft=n_fft, hop_length=hop_length, window=window,
center=True, normalized=normalized, length=x.size(-1),
return_complex=True)
self.assertEqual(x_stft.dtype, torch.cdouble)
if not dtype.is_complex:
self.assertEqual(x_roundtrip.imag, torch.zeros_like(x_roundtrip.imag),
atol=1e-6, rtol=0)
self.assertEqual(x_roundtrip.real, x)
else:
self.assertEqual(x_roundtrip, x)
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_stft_definition(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(10, 15)
))
for args in test_args:
window = torch.randn(args[1], device=device, dtype=dtype)
expected = _stft_reference(args[0], args[2], window)
actual = torch.stft(*args, window=window, center=False)
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_stft_real_equiv(self, device, dtype):
test_args = list(product(
# input
(torch.rand(600, device=device, dtype=dtype),
torch.rand(807, device=device, dtype=dtype),
torch.rand(14, 50, device=device, dtype=dtype),
torch.rand(6, 51, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# win_length
(None, 20),
# center
(False, True),
# pad_mode
("constant", "reflect", "circular"),
# normalized
(True, False),
))
for args in test_args:
x, n_fft, hop_length, win_length, center, pad_mode, normalized = args
expected = _complex_stft(x, n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, normalized=normalized)
actual = torch.stft(x, n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, normalized=normalized)
self.assertEqual(expected, actual)
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_istft_real_equiv(self, device, dtype):
test_args = list(product(
# input
(torch.rand(40, 20, device=device, dtype=dtype),
torch.rand(25, 1, device=device, dtype=dtype),
torch.rand(4, 20, 10, device=device, dtype=dtype)),
# hop_length
(None, 10),
# center
(False, True),
# normalized
(True, False),
))
for args in test_args:
x, hop_length, center, normalized = args
n_fft = x.size(-2)
expected = _complex_istft(x, n_fft, hop_length=hop_length,
center=center, normalized=normalized)
actual = torch.istft(x, n_fft, hop_length=hop_length,
center=center, normalized=normalized,
return_complex=True)
self.assertEqual(expected, actual)
@skipCPUIfNoFFT
def test_complex_stft_onesided(self, device):
# stft of complex input cannot be onesided
for x_dtype, window_dtype in product((torch.double, torch.cdouble), repeat=2):
x = torch.rand(100, device=device, dtype=x_dtype)
window = torch.rand(10, device=device, dtype=window_dtype)
if x_dtype.is_complex or window_dtype.is_complex:
with self.assertRaisesRegex(RuntimeError, 'complex'):
x.stft(10, window=window, pad_mode='constant', onesided=True)
else:
y = x.stft(10, window=window, pad_mode='constant', onesided=True,
return_complex=True)
self.assertEqual(y.dtype, torch.cdouble)
self.assertEqual(y.size(), (6, 51))
x = torch.rand(100, device=device, dtype=torch.cdouble)
with self.assertRaisesRegex(RuntimeError, 'complex'):
x.stft(10, pad_mode='constant', onesided=True)
# stft is currently warning that it requires return-complex while an upgrader is written
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_stft_requires_complex(self, device):
x = torch.rand(100)
with self.assertRaisesRegex(RuntimeError, 'stft requires the return_complex parameter'):
y = x.stft(10, pad_mode='constant')
@skipCPUIfNoFFT
def test_fft_input_modification(self, device):
# FFT functions should not modify their input (gh-34551)
signal = torch.ones((2, 2, 2), device=device)
signal_copy = signal.clone()
spectrum = torch.fft.fftn(signal, dim=(-2, -1))
self.assertEqual(signal, signal_copy)
spectrum_copy = spectrum.clone()
_ = torch.fft.ifftn(spectrum, dim=(-2, -1))
self.assertEqual(spectrum, spectrum_copy)
half_spectrum = torch.fft.rfftn(signal, dim=(-2, -1))
self.assertEqual(signal, signal_copy)
half_spectrum_copy = half_spectrum.clone()
_ = torch.fft.irfftn(half_spectrum_copy, s=(2, 2), dim=(-2, -1))
self.assertEqual(half_spectrum, half_spectrum_copy)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_fft_plan_repeatable(self, device):
# Regression test for gh-58724 and gh-63152
for n in [2048, 3199, 5999]:
a = torch.randn(n, device=device, dtype=torch.complex64)
res1 = torch.fft.fftn(a)
res2 = torch.fft.fftn(a.clone())
self.assertEqual(res1, res2)
a = torch.randn(n, device=device, dtype=torch.float64)
res1 = torch.fft.rfft(a)
res2 = torch.fft.rfft(a.clone())
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_simple_cases(self, device, dtype):
"""stft -> istft should recover the original signale"""
def _test(input, n_fft, length):
stft = torch.stft(input, n_fft=n_fft, return_complex=True)
inverse = torch.istft(stft, n_fft=n_fft, length=length)
self.assertEqual(input, inverse, exact_dtype=True)
_test(torch.ones(4, dtype=dtype, device=device), 4, 4)
_test(torch.zeros(4, dtype=dtype, device=device), 4, 4)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_various_params(self, device, dtype):
"""stft -> istft should recover the original signale"""
def _test_istft_is_inverse_of_stft(stft_kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
data_sizes = [(2, 20), (3, 15), (4, 10)]
num_trials = 100
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for sizes in data_sizes:
for i in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
inversed = torch.istft(stft, length=original.size(1), **istft_kwargs)
self.assertEqual(
inversed, original, msg='istft comparison against original',
atol=7e-6, rtol=0, exact_dtype=True)
patterns = [
# hann_window, centered, normalized, onesided
{
'n_fft': 12,
'hop_length': 4,
'win_length': 12,
'window': torch.hann_window(12, dtype=dtype, device=device),
'center': True,
'pad_mode': 'reflect',
'normalized': True,
'onesided': True,
},
# hann_window, centered, not normalized, not onesided
{
'n_fft': 12,
'hop_length': 2,
'win_length': 8,
'window': torch.hann_window(8, dtype=dtype, device=device),
'center': True,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
},
# hamming_window, centered, normalized, not onesided
{
'n_fft': 15,
'hop_length': 3,
'win_length': 11,
'window': torch.hamming_window(11, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': True,
'onesided': False,
},
# hamming_window, centered, not normalized, onesided
# window same size as n_fft
{
'n_fft': 5,
'hop_length': 2,
'win_length': 5,
'window': torch.hamming_window(5, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
},
]
for i, pattern in enumerate(patterns):
_test_istft_is_inverse_of_stft(pattern)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_with_padding(self, device, dtype):
"""long hop_length or not centered may cause length mismatch in the inversed signal"""
def _test_istft_is_inverse_of_stft_with_padding(stft_kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
num_trials = 100
sizes = stft_kwargs['size']
del stft_kwargs['size']
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for i in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
with self.assertWarnsOnceRegex(UserWarning, "The length of signal is shorter than the length parameter."):
inversed = torch.istft(stft, length=original.size(-1), **istft_kwargs)
n_frames = stft.size(-1)
if stft_kwargs["center"] is True:
len_expected = stft_kwargs["n_fft"] // 2 + stft_kwargs["hop_length"] * (n_frames - 1)
else:
len_expected = stft_kwargs["n_fft"] + stft_kwargs["hop_length"] * (n_frames - 1)
# trim the original for case when constructed signal is shorter than original
padding = inversed[..., len_expected:]
inversed = inversed[..., :len_expected]
original = original[..., :len_expected]
# test the padding points of the inversed signal are all zeros
zeros = torch.zeros_like(padding, device=padding.device)
self.assertEqual(
padding, zeros, msg='istft padding values against zeros',
atol=7e-6, rtol=0, exact_dtype=True)
self.assertEqual(
inversed, original, msg='istft comparison against original',
atol=7e-6, rtol=0, exact_dtype=True)
patterns = [
# hamming_window, not centered, not normalized, not onesided
# window same size as n_fft
{
'size': [2, 20],
'n_fft': 3,
'hop_length': 2,
'win_length': 3,
'window': torch.hamming_window(3, dtype=dtype, device=device),
'center': False,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
},
# hamming_window, centered, not normalized, onesided, long hop_length
# window same size as n_fft
{
'size': [2, 500],
'n_fft': 256,
'hop_length': 254,
'win_length': 256,
'window': torch.hamming_window(256, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
},
]
for i, pattern in enumerate(patterns):
_test_istft_is_inverse_of_stft_with_padding(pattern)
@onlyNativeDeviceTypes
def test_istft_throws(self, device):
"""istft should throw exception for invalid parameters"""
stft = torch.zeros((3, 5, 2), device=device)
# the window is size 1 but it hops 20 so there is a gap which throw an error
self.assertRaises(
RuntimeError, torch.istft, stft, n_fft=4,
hop_length=20, win_length=1, window=torch.ones(1))
# A window of zeros does not meet NOLA
invalid_window = torch.zeros(4, device=device)
self.assertRaises(
RuntimeError, torch.istft, stft, n_fft=4, win_length=4, window=invalid_window)
# Input cannot be empty
self.assertRaises(RuntimeError, torch.istft, torch.zeros((3, 0, 2)), 2)
self.assertRaises(RuntimeError, torch.istft, torch.zeros((0, 3, 2)), 2)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_of_sine(self, device, dtype):
complex_dtype = corresponding_complex_dtype(dtype)
def _test(amplitude, L, n):
# stft of amplitude*sin(2*pi/L*n*x) with the hop length and window size equaling L
x = torch.arange(2 * L + 1, device=device, dtype=dtype)
original = amplitude * torch.sin(2 * math.pi / L * x * n)
# stft = torch.stft(original, L, hop_length=L, win_length=L,
# window=torch.ones(L), center=False, normalized=False)
stft = torch.zeros((L // 2 + 1, 2), device=device, dtype=complex_dtype)
stft_largest_val = (amplitude * L) / 2.0
if n < stft.size(0):
stft[n].imag = torch.tensor(-stft_largest_val, dtype=dtype)
if 0 <= L - n < stft.size(0):
# symmetric about L // 2
stft[L - n].imag = torch.tensor(stft_largest_val, dtype=dtype)
inverse = torch.istft(
stft, L, hop_length=L, win_length=L,
window=torch.ones(L, device=device, dtype=dtype), center=False, normalized=False)
# There is a larger error due to the scaling of amplitude
original = original[..., :inverse.size(-1)]
self.assertEqual(inverse, original, atol=1e-3, rtol=0)
_test(amplitude=123, L=5, n=1)
_test(amplitude=150, L=5, n=2)
_test(amplitude=111, L=5, n=3)
_test(amplitude=160, L=7, n=4)
_test(amplitude=145, L=8, n=5)
_test(amplitude=80, L=9, n=6)
_test(amplitude=99, L=10, n=7)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_linearity(self, device, dtype):
num_trials = 100
complex_dtype = corresponding_complex_dtype(dtype)
def _test(data_size, kwargs):
for i in range(num_trials):
tensor1 = torch.randn(data_size, device=device, dtype=complex_dtype)
tensor2 = torch.randn(data_size, device=device, dtype=complex_dtype)
a, b = torch.rand(2, dtype=dtype, device=device)
# Also compare method vs. functional call signature
istft1 = tensor1.istft(**kwargs)
istft2 = tensor2.istft(**kwargs)
istft = a * istft1 + b * istft2
estimate = torch.istft(a * tensor1 + b * tensor2, **kwargs)
self.assertEqual(istft, estimate, atol=1e-5, rtol=0)
patterns = [
# hann_window, centered, normalized, onesided
(
(2, 7, 7),
{
'n_fft': 12,
'window': torch.hann_window(12, device=device, dtype=dtype),
'center': True,
'normalized': True,
'onesided': True,
},
),
# hann_window, centered, not normalized, not onesided
(
(2, 12, 7),
{
'n_fft': 12,
'window': torch.hann_window(12, device=device, dtype=dtype),
'center': True,
'normalized': False,
'onesided': False,
},
),
# hamming_window, centered, normalized, not onesided
(
(2, 12, 7),
{
'n_fft': 12,
'window': torch.hamming_window(12, device=device, dtype=dtype),
'center': True,
'normalized': True,
'onesided': False,
},
),
# hamming_window, not centered, not normalized, onesided
(
(2, 7, 3),
{
'n_fft': 12,
'window': torch.hamming_window(12, device=device, dtype=dtype),
'center': False,
'normalized': False,
'onesided': True,
},
)
]
for data_size, kwargs in patterns:
_test(data_size, kwargs)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_batch_istft(self, device):
original = torch.tensor([
[4., 4., 4., 4., 4.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]
], device=device, dtype=torch.complex64)
single = original.repeat(1, 1, 1)
multi = original.repeat(4, 1, 1)
i_original = torch.istft(original, n_fft=4, length=4)
i_single = torch.istft(single, n_fft=4, length=4)
i_multi = torch.istft(multi, n_fft=4, length=4)
self.assertEqual(i_original.repeat(1, 1), i_single, atol=1e-6, rtol=0, exact_dtype=True)
self.assertEqual(i_original.repeat(4, 1), i_multi, atol=1e-6, rtol=0, exact_dtype=True)
@onlyCUDA
@skipIf(not TEST_MKL, "Test requires MKL")
def test_stft_window_device(self, device):
# Test the (i)stft window must be on the same device as the input
x = torch.randn(1000, dtype=torch.complex64)
window = torch.randn(100, dtype=torch.complex64)
with self.assertRaisesRegex(RuntimeError, "stft input and window must be on the same device"):
torch.stft(x, n_fft=100, window=window.to(device))
with self.assertRaisesRegex(RuntimeError, "stft input and window must be on the same device"):
torch.stft(x.to(device), n_fft=100, window=window)
X = torch.stft(x, n_fft=100, window=window)
with self.assertRaisesRegex(RuntimeError, "istft input and window must be on the same device"):
torch.istft(X, n_fft=100, window=window.to(device))
with self.assertRaisesRegex(RuntimeError, "istft input and window must be on the same device"):
torch.istft(x.to(device), n_fft=100, window=window)
class FFTDocTestFinder:
'''The default doctest finder doesn't like that function.__module__ doesn't
match torch.fft. It assumes the functions are leaked imports.
'''
def __init__(self):
self.parser = doctest.DocTestParser()
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
doctests = []
modname = name if name is not None else obj.__name__
globs = {} if globs is None else globs
for fname in obj.__all__:
func = getattr(obj, fname)
if inspect.isroutine(func):
qualname = modname + '.' + fname
docstring = inspect.getdoc(func)
if docstring is None:
continue
examples = self.parser.get_doctest(
docstring, globs=globs, name=fname, filename=None, lineno=None)
doctests.append(examples)
return doctests
class TestFFTDocExamples(TestCase):
pass
|
def skip_helper_for_fft(device, dtype):
device_type = torch.device(device).type
if dtype not in (torch.half, torch.complex32):
return
if device_type == 'cpu':
raise unittest.SkipTest("half and complex32 are not supported on CPU")
if not SM53OrLater:
raise unittest.SkipTest("half and complex32 are only supported on CUDA device with SM>53")
# Tests of functions related to Fourier analysis in the torch.fft namespace
class TestFFT(TestCase):
exact_dtype = True
@onlyNativeDeviceTypes
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.OneD],
allowed_dtypes=(torch.float, torch.cfloat))
def test_reference_1d(self, device, dtype, op):
if op.ref is None:
raise unittest.SkipTest("No reference implementation")
norm_modes = REFERENCE_NORM_MODES
test_args = [
*product(
# input
(torch.randn(67, device=device, dtype=dtype),
torch.randn(80, device=device, dtype=dtype),
torch.randn(12, 14, device=device, dtype=dtype),
torch.randn(9, 6, 3, device=device, dtype=dtype)),
# n
(None, 50, 6),
# dim
(-1, 0),
# norm
norm_modes
),
# Test transforming middle dimensions of multi-dim tensor
*product(
(torch.randn(4, 5, 6, 7, device=device, dtype=dtype),),
(None,),
(1, 2, -2,),
norm_modes
)
]
for iargs in test_args:
args = list(iargs)
input = args[0]
args = args[1:]
expected = op.ref(input.cpu().numpy(), *args)
exact_dtype = dtype in (torch.double, torch.complex128)
actual = op(input, *args)
self.assertEqual(actual, expected, exact_dtype=exact_dtype)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
torch.chalf : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double, torch.complex32, torch.complex64, torch.complex128)
def test_fft_round_trip(self, device, dtype):
skip_helper_for_fft(device, dtype)
# Test that round trip through ifft(fft(x)) is the identity
if dtype not in (torch.half, torch.complex32):
test_args = list(product(
# input
(torch.randn(67, device=device, dtype=dtype),
torch.randn(80, device=device, dtype=dtype),
torch.randn(12, 14, device=device, dtype=dtype),
torch.randn(9, 6, 3, device=device, dtype=dtype)),
# dim
(-1, 0),
# norm
(None, "forward", "backward", "ortho")
))
else:
# cuFFT supports powers of 2 for half and complex half precision
test_args = list(product(
# input
(torch.randn(64, device=device, dtype=dtype),
torch.randn(128, device=device, dtype=dtype),
torch.randn(4, 16, device=device, dtype=dtype),
torch.randn(8, 6, 2, device=device, dtype=dtype)),
# dim
(-1, 0),
# norm
(None, "forward", "backward", "ortho")
))
fft_functions = [(torch.fft.fft, torch.fft.ifft)]
# Real-only functions
if not dtype.is_complex:
# NOTE: Using ihfft as "forward" transform to avoid needing to
# generate true half-complex input
fft_functions += [(torch.fft.rfft, torch.fft.irfft),
(torch.fft.ihfft, torch.fft.hfft)]
for forward, backward in fft_functions:
for x, dim, norm in test_args:
kwargs = {
'n': x.size(dim),
'dim': dim,
'norm': norm,
}
y = backward(forward(x, **kwargs), **kwargs)
if x.dtype is torch.half and y.dtype is torch.complex32:
# Since type promotion currently doesn't work with complex32
# manually promote `x` to complex32
x = x.to(torch.complex32)
# For real input, ifft(fft(x)) will convert to complex
self.assertEqual(x, y, exact_dtype=(
forward != torch.fft.fft or x.is_complex()))
# Note: NumPy will throw a ValueError for an empty input
@onlyNativeDeviceTypes
@ops(spectral_funcs, allowed_dtypes=(torch.half, torch.float, torch.complex32, torch.cfloat))
def test_empty_fft(self, device, dtype, op):
t = torch.empty(1, 0, device=device, dtype=dtype)
match = r"Invalid number of data points \([-\d]*\) specified"
with self.assertRaisesRegex(RuntimeError, match):
op(t)
@onlyNativeDeviceTypes
def test_empty_ifft(self, device):
t = torch.empty(2, 1, device=device, dtype=torch.complex64)
match = r"Invalid number of data points \([-\d]*\) specified"
for f in [torch.fft.irfft, torch.fft.irfft2, torch.fft.irfftn,
torch.fft.hfft, torch.fft.hfft2, torch.fft.hfftn]:
with self.assertRaisesRegex(RuntimeError, match):
f(t)
@onlyNativeDeviceTypes
def test_fft_invalid_dtypes(self, device):
t = torch.randn(64, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "rfft expects a real input tensor"):
torch.fft.rfft(t)
with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input tensor"):
torch.fft.rfftn(t)
with self.assertRaisesRegex(RuntimeError, "ihfft expects a real input tensor"):
torch.fft.ihfft(t)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_fft_type_promotion(self, device, dtype):
skip_helper_for_fft(device, dtype)
if dtype.is_complex or dtype.is_floating_point:
t = torch.randn(64, device=device, dtype=dtype)
else:
t = torch.randint(-2, 2, (64,), device=device, dtype=dtype)
PROMOTION_MAP = {
torch.int8: torch.complex64,
torch.half: torch.complex32,
torch.float: torch.complex64,
torch.double: torch.complex128,
torch.complex32: torch.complex32,
torch.complex64: torch.complex64,
torch.complex128: torch.complex128,
}
T = torch.fft.fft(t)
self.assertEqual(T.dtype, PROMOTION_MAP[dtype])
PROMOTION_MAP_C2R = {
torch.int8: torch.float,
torch.half: torch.half,
torch.float: torch.float,
torch.double: torch.double,
torch.complex32: torch.half,
torch.complex64: torch.float,
torch.complex128: torch.double,
}
if dtype in (torch.half, torch.complex32):
# cuFFT supports powers of 2 for half and complex half precision
# NOTE: With hfft and default args where output_size n=2*(input_size - 1),
# we make sure that logical fft size is a power of two.
x = torch.randn(65, device=device, dtype=dtype)
R = torch.fft.hfft(x)
else:
R = torch.fft.hfft(t)
self.assertEqual(R.dtype, PROMOTION_MAP_C2R[dtype])
if not dtype.is_complex:
PROMOTION_MAP_R2C = {
torch.int8: torch.complex64,
torch.half: torch.complex32,
torch.float: torch.complex64,
torch.double: torch.complex128,
}
C = torch.fft.rfft(t)
self.assertEqual(C.dtype, PROMOTION_MAP_R2C[dtype])
@onlyNativeDeviceTypes
@ops(spectral_funcs, dtypes=OpDTypes.unsupported,
allowed_dtypes=[torch.half, torch.bfloat16])
def test_fft_half_and_bfloat16_errors(self, device, dtype, op):
# TODO: Remove torch.half error when complex32 is fully implemented
sample = first_sample(self, op.sample_inputs(device, dtype))
device_type = torch.device(device).type
default_msg = "Unsupported dtype"
if dtype is torch.half and device_type == 'cuda' and TEST_WITH_ROCM:
err_msg = default_msg
elif dtype is torch.half and device_type == 'cuda' and not SM53OrLater:
err_msg = "cuFFT doesn't support signals of half type with compute capability less than SM_53"
else:
err_msg = default_msg
with self.assertRaisesRegex(RuntimeError, err_msg):
op(sample.input, *sample.args, **sample.kwargs)
@onlyNativeDeviceTypes
@ops(spectral_funcs, allowed_dtypes=(torch.half, torch.chalf))
def test_fft_half_and_chalf_not_power_of_two_error(self, device, dtype, op):
t = make_tensor(13, 13, device=device, dtype=dtype)
err_msg = "cuFFT only supports dimensions whose sizes are powers of two"
with self.assertRaisesRegex(RuntimeError, err_msg):
op(t)
if op.ndimensional in (SpectralFuncType.ND, SpectralFuncType.TwoD):
kwargs = {'s': (12, 12)}
else:
kwargs = {'n': 12}
with self.assertRaisesRegex(RuntimeError, err_msg):
op(t, **kwargs)
# nd-fft tests
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.ND],
allowed_dtypes=(torch.cfloat, torch.cdouble))
def test_reference_nd(self, device, dtype, op):
if op.ref is None:
raise unittest.SkipTest("No reference implementation")
norm_modes = REFERENCE_NORM_MODES
# input_ndim, s, dim
transform_desc = [
*product(range(2, 5), (None,), (None, (0,), (0, -1))),
*product(range(2, 5), (None, (4, 10)), (None,)),
(6, None, None),
(5, None, (1, 3, 4)),
(3, None, (1,)),
(1, None, (0,)),
(4, (10, 10), None),
(4, (10, 10), (0, 1))
]
for input_ndim, s, dim in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
input = torch.randn(*shape, device=device, dtype=dtype)
for norm in norm_modes:
expected = op.ref(input.cpu().numpy(), s, dim, norm)
exact_dtype = dtype in (torch.double, torch.complex128)
actual = op(input, s, dim, norm)
self.assertEqual(actual, expected, exact_dtype=exact_dtype)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
torch.chalf : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_fftn_round_trip(self, device, dtype):
skip_helper_for_fft(device, dtype)
norm_modes = (None, "forward", "backward", "ortho")
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(7, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, 0),
]
fft_functions = [(torch.fft.fftn, torch.fft.ifftn)]
# Real-only functions
if not dtype.is_complex:
# NOTE: Using ihfftn as "forward" transform to avoid needing to
# generate true half-complex input
fft_functions += [(torch.fft.rfftn, torch.fft.irfftn),
(torch.fft.ihfftn, torch.fft.hfftn)]
for input_ndim, dim in transform_desc:
if dtype in (torch.half, torch.complex32):
# cuFFT supports powers of 2 for half and complex half precision
shape = itertools.islice(itertools.cycle((2, 4, 8)), input_ndim)
else:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
x = torch.randn(*shape, device=device, dtype=dtype)
for (forward, backward), norm in product(fft_functions, norm_modes):
if isinstance(dim, tuple):
s = [x.size(d) for d in dim]
else:
s = x.size() if dim is None else x.size(dim)
kwargs = {'s': s, 'dim': dim, 'norm': norm}
y = backward(forward(x, **kwargs), **kwargs)
# For real input, ifftn(fftn(x)) will convert to complex
if x.dtype is torch.half and y.dtype is torch.chalf:
# Since type promotion currently doesn't work with complex32
# manually promote `x` to complex32
self.assertEqual(x.to(torch.chalf), y)
else:
self.assertEqual(x, y, exact_dtype=(
forward != torch.fft.fftn or x.is_complex()))
@onlyNativeDeviceTypes
@ops([op for op in spectral_funcs if op.ndimensional == SpectralFuncType.ND],
allowed_dtypes=[torch.float, torch.cfloat])
def test_fftn_invalid(self, device, dtype, op):
a = torch.rand(10, 10, 10, device=device, dtype=dtype)
# FIXME: https://github.com/pytorch/pytorch/issues/108205
errMsg = "dims must be unique"
with self.assertRaisesRegex(RuntimeError, errMsg):
op(a, dim=(0, 1, 0))
with self.assertRaisesRegex(RuntimeError, errMsg):
op(a, dim=(2, -1))
with self.assertRaisesRegex(RuntimeError, "dim and shape .* same length"):
op(a, s=(1,), dim=(0, 1))
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
op(a, dim=(3,))
with self.assertRaisesRegex(RuntimeError, "tensor only has 3 dimensions"):
op(a, s=(10, 10, 10, 10))
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_fftn_noop_transform(self, device, dtype):
skip_helper_for_fft(device, dtype)
RESULT_TYPE = {
torch.half: torch.chalf,
torch.float: torch.cfloat,
torch.double: torch.cdouble,
}
for op in [
torch.fft.fftn,
torch.fft.ifftn,
torch.fft.fft2,
torch.fft.ifft2,
]:
inp = make_tensor((10, 10), device=device, dtype=dtype)
out = torch.fft.fftn(inp, dim=[])
expect_dtype = RESULT_TYPE.get(inp.dtype, inp.dtype)
expect = inp.to(expect_dtype)
self.assertEqual(expect, out)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double)
def test_hfftn(self, device, dtype):
skip_helper_for_fft(device, dtype)
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(6, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, (0,)),
(4, (0, 1))
]
for input_ndim, dim in transform_desc:
actual_dims = list(range(input_ndim)) if dim is None else dim
if dtype is torch.half:
shape = tuple(itertools.islice(itertools.cycle((2, 4, 8)), input_ndim))
else:
shape = tuple(itertools.islice(itertools.cycle(range(4, 9)), input_ndim))
expect = torch.randn(*shape, device=device, dtype=dtype)
input = torch.fft.ifftn(expect, dim=dim, norm="ortho")
lastdim = actual_dims[-1]
lastdim_size = input.size(lastdim) // 2 + 1
idx = [slice(None)] * input_ndim
idx[lastdim] = slice(0, lastdim_size)
input = input[idx]
s = [shape[dim] for dim in actual_dims]
actual = torch.fft.hfftn(input, s=s, dim=dim, norm="ortho")
self.assertEqual(expect, actual)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@toleranceOverride({
torch.half : tol(1e-2, 1e-2),
})
@dtypes(torch.half, torch.float, torch.double)
def test_ihfftn(self, device, dtype):
skip_helper_for_fft(device, dtype)
# input_ndim, dim
transform_desc = [
*product(range(2, 5), (None, (0,), (0, -1))),
(6, None),
(5, (1, 3, 4)),
(3, (1,)),
(1, (0,)),
(4, (0, 1))
]
for input_ndim, dim in transform_desc:
if dtype is torch.half:
shape = tuple(itertools.islice(itertools.cycle((2, 4, 8)), input_ndim))
else:
shape = tuple(itertools.islice(itertools.cycle(range(4, 9)), input_ndim))
input = torch.randn(*shape, device=device, dtype=dtype)
expect = torch.fft.ifftn(input, dim=dim, norm="ortho")
# Slice off the half-symmetric component
lastdim = -1 if dim is None else dim[-1]
lastdim_size = expect.size(lastdim) // 2 + 1
idx = [slice(None)] * input_ndim
idx[lastdim] = slice(0, lastdim_size)
expect = expect[idx]
actual = torch.fft.ihfftn(input, dim=dim, norm="ortho")
self.assertEqual(expect, actual)
# 2d-fft tests
# NOTE: 2d transforms are only thin wrappers over n-dim transforms,
# so don't require exhaustive testing.
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.complex128)
def test_fft2_numpy(self, device, dtype):
norm_modes = REFERENCE_NORM_MODES
# input_ndim, s
transform_desc = [
*product(range(2, 5), (None, (4, 10))),
]
fft_functions = ['fft2', 'ifft2', 'irfft2', 'hfft2']
if dtype.is_floating_point:
fft_functions += ['rfft2', 'ihfft2']
for input_ndim, s in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
input = torch.randn(*shape, device=device, dtype=dtype)
for fname, norm in product(fft_functions, norm_modes):
torch_fn = getattr(torch.fft, fname)
if "hfft" in fname:
if not has_scipy_fft:
continue # Requires scipy to compare against
numpy_fn = getattr(scipy.fft, fname)
else:
numpy_fn = getattr(np.fft, fname)
def fn(t: torch.Tensor, s: Optional[List[int]], dim: List[int] = (-2, -1), norm: Optional[str] = None):
return torch_fn(t, s, dim, norm)
torch_fns = (torch_fn, torch.jit.script(fn))
# Once with dim defaulted
input_np = input.cpu().numpy()
expected = numpy_fn(input_np, s, norm=norm)
for fn in torch_fns:
actual = fn(input, s, norm=norm)
self.assertEqual(actual, expected)
# Once with explicit dims
dim = (1, 0)
expected = numpy_fn(input_np, s, dim, norm)
for fn in torch_fns:
actual = fn(input, s, dim, norm)
self.assertEqual(actual, expected)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.complex64)
def test_fft2_fftn_equivalence(self, device, dtype):
norm_modes = (None, "forward", "backward", "ortho")
# input_ndim, s, dim
transform_desc = [
*product(range(2, 5), (None, (4, 10)), (None, (1, 0))),
(3, None, (0, 2)),
]
fft_functions = ['fft', 'ifft', 'irfft', 'hfft']
# Real-only functions
if dtype.is_floating_point:
fft_functions += ['rfft', 'ihfft']
for input_ndim, s, dim in transform_desc:
shape = itertools.islice(itertools.cycle(range(4, 9)), input_ndim)
x = torch.randn(*shape, device=device, dtype=dtype)
for func, norm in product(fft_functions, norm_modes):
f2d = getattr(torch.fft, func + '2')
fnd = getattr(torch.fft, func + 'n')
kwargs = {'s': s, 'norm': norm}
if dim is not None:
kwargs['dim'] = dim
expect = fnd(x, **kwargs)
else:
expect = fnd(x, dim=(-2, -1), **kwargs)
actual = f2d(x, **kwargs)
self.assertEqual(actual, expect)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
def test_fft2_invalid(self, device):
a = torch.rand(10, 10, 10, device=device)
fft_funcs = (torch.fft.fft2, torch.fft.ifft2,
torch.fft.rfft2, torch.fft.irfft2)
for func in fft_funcs:
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
func(a, dim=(0, 0))
with self.assertRaisesRegex(RuntimeError, "dims must be unique"):
func(a, dim=(2, -1))
with self.assertRaisesRegex(RuntimeError, "dim and shape .* same length"):
func(a, s=(1,))
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
func(a, dim=(2, 3))
c = torch.complex(a, a)
with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input"):
torch.fft.rfft2(c)
# Helper functions
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double)
def test_fftfreq_numpy(self, device, dtype):
test_args = [
*product(
# n
range(1, 20),
# d
(None, 10.0),
)
]
functions = ['fftfreq', 'rfftfreq']
for fname in functions:
torch_fn = getattr(torch.fft, fname)
numpy_fn = getattr(np.fft, fname)
for n, d in test_args:
args = (n,) if d is None else (n, d)
expected = numpy_fn(*args)
actual = torch_fn(*args, device=device, dtype=dtype)
self.assertEqual(actual, expected, exact_dtype=False)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_fftfreq_out(self, device, dtype):
for func in (torch.fft.fftfreq, torch.fft.rfftfreq):
expect = func(n=100, d=.5, device=device, dtype=dtype)
actual = torch.empty((), device=device, dtype=dtype)
with self.assertWarnsRegex(UserWarning, "out tensor will be resized"):
func(n=100, d=.5, out=actual)
self.assertEqual(actual, expect)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_fftshift_numpy(self, device, dtype):
test_args = [
# shape, dim
*product(((11,), (12,)), (None, 0, -1)),
*product(((4, 5), (6, 6)), (None, 0, (-1,))),
*product(((1, 1, 4, 6, 7, 2),), (None, (3, 4))),
]
functions = ['fftshift', 'ifftshift']
for shape, dim in test_args:
input = torch.rand(*shape, device=device, dtype=dtype)
input_np = input.cpu().numpy()
for fname in functions:
torch_fn = getattr(torch.fft, fname)
numpy_fn = getattr(np.fft, fname)
expected = numpy_fn(input_np, axes=dim)
actual = torch_fn(input, dim=dim)
self.assertEqual(actual, expected)
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
@dtypes(torch.float, torch.double)
def test_fftshift_frequencies(self, device, dtype):
for n in range(10, 15):
sorted_fft_freqs = torch.arange(-(n // 2), n - (n // 2),
device=device, dtype=dtype)
x = torch.fft.fftfreq(n, d=1 / n, device=device, dtype=dtype)
# Test fftshift sorts the fftfreq output
shifted = torch.fft.fftshift(x)
self.assertEqual(shifted, shifted.sort().values)
self.assertEqual(sorted_fft_freqs, shifted)
# And ifftshift is the inverse
self.assertEqual(x, torch.fft.ifftshift(shifted))
# Legacy fft tests
def _test_fft_ifft_rfft_irfft(self, device, dtype):
complex_dtype = corresponding_complex_dtype(dtype)
def _test_complex(sizes, signal_ndim, prepro_fn=lambda x: x):
x = prepro_fn(torch.randn(*sizes, dtype=complex_dtype, device=device))
dim = tuple(range(-signal_ndim, 0))
for norm in ('ortho', None):
res = torch.fft.fftn(x, dim=dim, norm=norm)
rec = torch.fft.ifftn(res, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='fft and ifft')
res = torch.fft.ifftn(x, dim=dim, norm=norm)
rec = torch.fft.fftn(res, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='ifft and fft')
def _test_real(sizes, signal_ndim, prepro_fn=lambda x: x):
x = prepro_fn(torch.randn(*sizes, dtype=dtype, device=device))
signal_numel = 1
signal_sizes = x.size()[-signal_ndim:]
dim = tuple(range(-signal_ndim, 0))
for norm in (None, 'ortho'):
res = torch.fft.rfftn(x, dim=dim, norm=norm)
rec = torch.fft.irfftn(res, s=signal_sizes, dim=dim, norm=norm)
self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='rfft and irfft')
res = torch.fft.fftn(x, dim=dim, norm=norm)
rec = torch.fft.ifftn(res, dim=dim, norm=norm)
x_complex = torch.complex(x, torch.zeros_like(x))
self.assertEqual(x_complex, rec, atol=1e-8, rtol=0, msg='fft and ifft (from real)')
# contiguous case
_test_real((100,), 1)
_test_real((10, 1, 10, 100), 1)
_test_real((100, 100), 2)
_test_real((2, 2, 5, 80, 60), 2)
_test_real((50, 40, 70), 3)
_test_real((30, 1, 50, 25, 20), 3)
_test_complex((100,), 1)
_test_complex((100, 100), 1)
_test_complex((100, 100), 2)
_test_complex((1, 20, 80, 60), 2)
_test_complex((50, 40, 70), 3)
_test_complex((6, 5, 50, 25, 20), 3)
# non-contiguous case
_test_real((165,), 1, lambda x: x.narrow(0, 25, 100)) # input is not aligned to complex type
_test_real((100, 100, 3), 1, lambda x: x[:, :, 0])
_test_real((100, 100), 2, lambda x: x.t())
_test_real((20, 100, 10, 10), 2, lambda x: x.view(20, 100, 100)[:, :60])
_test_real((65, 80, 115), 3, lambda x: x[10:60, 13:53, 10:80])
_test_real((30, 20, 50, 25), 3, lambda x: x.transpose(1, 2).transpose(2, 3))
_test_complex((100,), 1, lambda x: x.expand(100, 100))
_test_complex((20, 90, 110), 2, lambda x: x[:, 5:85].narrow(2, 5, 100))
_test_complex((40, 60, 3, 80), 3, lambda x: x.transpose(2, 0).select(0, 2)[5:55, :, 10:])
_test_complex((30, 55, 50, 22), 3, lambda x: x[:, 3:53, 15:40, 1:21])
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_fft_ifft_rfft_irfft(self, device, dtype):
self._test_fft_ifft_rfft_irfft(device, dtype)
@deviceCountAtLeast(1)
@onlyCUDA
@dtypes(torch.double)
def test_cufft_plan_cache(self, devices, dtype):
@contextmanager
def plan_cache_max_size(device, n):
if device is None:
plan_cache = torch.backends.cuda.cufft_plan_cache
else:
plan_cache = torch.backends.cuda.cufft_plan_cache[device]
original = plan_cache.max_size
plan_cache.max_size = n
try:
yield
finally:
plan_cache.max_size = original
with plan_cache_max_size(devices[0], max(1, torch.backends.cuda.cufft_plan_cache.size - 10)):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
with plan_cache_max_size(devices[0], 0):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
torch.backends.cuda.cufft_plan_cache.clear()
# check that stll works after clearing cache
with plan_cache_max_size(devices[0], 10):
self._test_fft_ifft_rfft_irfft(devices[0], dtype)
with self.assertRaisesRegex(RuntimeError, r"must be non-negative"):
torch.backends.cuda.cufft_plan_cache.max_size = -1
with self.assertRaisesRegex(RuntimeError, r"read-only property"):
torch.backends.cuda.cufft_plan_cache.size = -1
with self.assertRaisesRegex(RuntimeError, r"but got device with index"):
torch.backends.cuda.cufft_plan_cache[torch.cuda.device_count() + 10]
# Multigpu tests
if len(devices) > 1:
# Test that different GPU has different cache
x0 = torch.randn(2, 3, 3, device=devices[0])
x1 = x0.to(devices[1])
self.assertEqual(torch.fft.rfftn(x0, dim=(-2, -1)), torch.fft.rfftn(x1, dim=(-2, -1)))
# If a plan is used across different devices, the following line (or
# the assert above) would trigger illegal memory access. Other ways
# to trigger the error include
# (1) setting CUDA_LAUNCH_BLOCKING=1 (pytorch/pytorch#19224) and
# (2) printing a device 1 tensor.
x0.copy_(x1)
# Test that un-indexed `torch.backends.cuda.cufft_plan_cache` uses current device
with plan_cache_max_size(devices[0], 10):
with plan_cache_max_size(devices[1], 11):
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
with torch.cuda.device(devices[1]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(devices[0]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
with torch.cuda.device(devices[1]):
with plan_cache_max_size(None, 11): # default is cuda:1
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(devices[0]):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
@onlyCUDA
@dtypes(torch.cfloat, torch.cdouble)
def test_cufft_context(self, device, dtype):
# Regression test for https://github.com/pytorch/pytorch/issues/109448
x = torch.randn(32, dtype=dtype, device=device, requires_grad=True)
dout = torch.zeros(32, dtype=dtype, device=device)
# compute iFFT(FFT(x))
out = torch.fft.ifft(torch.fft.fft(x))
out.backward(dout, retain_graph=True)
dx = torch.fft.fft(torch.fft.ifft(dout))
self.assertTrue((x.grad - dx).abs().max() == 0)
self.assertFalse((x.grad - x).abs().max() == 0)
# passes on ROCm w/ python 2.7, fails w/ python 3.6
@skipIfTorchDynamo("cannot set WRITEABLE flag to True of this array")
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_stft(self, device, dtype):
if not TEST_LIBROSA:
raise unittest.SkipTest('librosa not found')
def librosa_stft(x, n_fft, hop_length, win_length, window, center):
if window is None:
window = np.ones(n_fft if win_length is None else win_length)
else:
window = window.cpu().numpy()
input_1d = x.dim() == 1
if input_1d:
x = x.view(1, -1)
# NOTE: librosa 0.9 changed default pad_mode to 'constant' (zero padding)
# however, we use the pre-0.9 default ('reflect')
pad_mode = 'reflect'
result = []
for xi in x:
ri = librosa.stft(xi.cpu().numpy(), n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
result.append(torch.from_numpy(np.stack([ri.real, ri.imag], -1)))
result = torch.stack(result, 0)
if input_1d:
result = result[0]
return result
def _test(sizes, n_fft, hop_length=None, win_length=None, win_sizes=None,
center=True, expected_error=None):
x = torch.randn(*sizes, dtype=dtype, device=device)
if win_sizes is not None:
window = torch.randn(*win_sizes, dtype=dtype, device=device)
else:
window = None
if expected_error is None:
result = x.stft(n_fft, hop_length, win_length, window,
center=center, return_complex=False)
# NB: librosa defaults to np.complex64 output, no matter what
# the input dtype
ref_result = librosa_stft(x, n_fft, hop_length, win_length, window, center)
self.assertEqual(result, ref_result, atol=7e-6, rtol=0, msg='stft comparison against librosa', exact_dtype=False)
# With return_complex=True, the result is the same but viewed as complex instead of real
result_complex = x.stft(n_fft, hop_length, win_length, window, center=center, return_complex=True)
self.assertEqual(result_complex, torch.view_as_complex(result))
else:
self.assertRaises(expected_error,
lambda: x.stft(n_fft, hop_length, win_length, window, center=center))
for center in [True, False]:
_test((10,), 7, center=center)
_test((10, 4000), 1024, center=center)
_test((10,), 7, 2, center=center)
_test((10, 4000), 1024, 512, center=center)
_test((10,), 7, 2, win_sizes=(7,), center=center)
_test((10, 4000), 1024, 512, win_sizes=(1024,), center=center)
# spectral oversample
_test((10,), 7, 2, win_length=5, center=center)
_test((10, 4000), 1024, 512, win_length=100, center=center)
_test((10, 4, 2), 1, 1, expected_error=RuntimeError)
_test((10,), 11, 1, center=False, expected_error=RuntimeError)
_test((10,), -1, 1, expected_error=RuntimeError)
_test((10,), 3, win_length=5, expected_error=RuntimeError)
_test((10,), 5, 4, win_sizes=(11,), expected_error=RuntimeError)
_test((10,), 5, 4, win_sizes=(1, 1), expected_error=RuntimeError)
@skipIfTorchDynamo("double")
@skipCPUIfNoFFT
@onlyNativeDeviceTypes
@dtypes(torch.double)
def test_istft_against_librosa(self, device, dtype):
if not TEST_LIBROSA:
raise unittest.SkipTest('librosa not found')
def librosa_istft(x, n_fft, hop_length, win_length, window, length, center):
if window is None:
window = np.ones(n_fft if win_length is None else win_length)
else:
window = window.cpu().numpy()
return librosa.istft(x.cpu().numpy(), n_fft=n_fft, hop_length=hop_length,
win_length=win_length, length=length, window=window, center=center)
def _test(size, n_fft, hop_length=None, win_length=None, win_sizes=None,
length=None, center=True):
x = torch.randn(size, dtype=dtype, device=device)
if win_sizes is not None:
window = torch.randn(*win_sizes, dtype=dtype, device=device)
else:
window = None
x_stft = x.stft(n_fft, hop_length, win_length, window, center=center,
onesided=True, return_complex=True)
ref_result = librosa_istft(x_stft, n_fft, hop_length, win_length,
window, length, center)
result = x_stft.istft(n_fft, hop_length, win_length, window,
length=length, center=center)
self.assertEqual(result, ref_result)
for center in [True, False]:
_test(10, 7, center=center)
_test(4000, 1024, center=center)
_test(4000, 1024, center=center, length=4000)
_test(10, 7, 2, center=center)
_test(4000, 1024, 512, center=center)
_test(4000, 1024, 512, center=center, length=4000)
_test(10, 7, 2, win_sizes=(7,), center=center)
_test(4000, 1024, 512, win_sizes=(1024,), center=center)
_test(4000, 1024, 512, win_sizes=(1024,), center=center, length=4000)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double, torch.cdouble)
def test_complex_stft_roundtrip(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype),
torch.randn(12, 60, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# center
(True,),
# pad_mode
("constant", "reflect", "circular"),
# normalized
(True, False),
# onesided
(True, False) if not dtype.is_complex else (False,),
))
for args in test_args:
x, n_fft, hop_length, center, pad_mode, normalized, onesided = args
common_kwargs = {
'n_fft': n_fft, 'hop_length': hop_length, 'center': center,
'normalized': normalized, 'onesided': onesided,
}
# Functional interface
x_stft = torch.stft(x, pad_mode=pad_mode, return_complex=True, **common_kwargs)
x_roundtrip = torch.istft(x_stft, return_complex=dtype.is_complex,
length=x.size(-1), **common_kwargs)
self.assertEqual(x_roundtrip, x)
# Tensor method interface
x_stft = x.stft(pad_mode=pad_mode, return_complex=True, **common_kwargs)
x_roundtrip = torch.istft(x_stft, return_complex=dtype.is_complex,
length=x.size(-1), **common_kwargs)
self.assertEqual(x_roundtrip, x)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double, torch.cdouble)
def test_stft_roundtrip_complex_window(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype),
torch.randn(12, 60, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# pad_mode
("constant", "reflect", "replicate", "circular"),
# normalized
(True, False),
))
for args in test_args:
x, n_fft, hop_length, pad_mode, normalized = args
window = torch.rand(n_fft, device=device, dtype=torch.cdouble)
x_stft = torch.stft(
x, n_fft=n_fft, hop_length=hop_length, window=window,
center=True, pad_mode=pad_mode, normalized=normalized)
self.assertEqual(x_stft.dtype, torch.cdouble)
self.assertEqual(x_stft.size(-2), n_fft) # Not onesided
x_roundtrip = torch.istft(
x_stft, n_fft=n_fft, hop_length=hop_length, window=window,
center=True, normalized=normalized, length=x.size(-1),
return_complex=True)
self.assertEqual(x_stft.dtype, torch.cdouble)
if not dtype.is_complex:
self.assertEqual(x_roundtrip.imag, torch.zeros_like(x_roundtrip.imag),
atol=1e-6, rtol=0)
self.assertEqual(x_roundtrip.real, x)
else:
self.assertEqual(x_roundtrip, x)
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_stft_definition(self, device, dtype):
test_args = list(product(
# input
(torch.randn(600, device=device, dtype=dtype),
torch.randn(807, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(10, 15)
))
for args in test_args:
window = torch.randn(args[1], device=device, dtype=dtype)
expected = _stft_reference(args[0], args[2], window)
actual = torch.stft(*args, window=window, center=False)
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_stft_real_equiv(self, device, dtype):
test_args = list(product(
# input
(torch.rand(600, device=device, dtype=dtype),
torch.rand(807, device=device, dtype=dtype),
torch.rand(14, 50, device=device, dtype=dtype),
torch.rand(6, 51, device=device, dtype=dtype)),
# n_fft
(50, 27),
# hop_length
(None, 10),
# win_length
(None, 20),
# center
(False, True),
# pad_mode
("constant", "reflect", "circular"),
# normalized
(True, False),
))
for args in test_args:
x, n_fft, hop_length, win_length, center, pad_mode, normalized = args
expected = _complex_stft(x, n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, normalized=normalized)
actual = torch.stft(x, n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, normalized=normalized)
self.assertEqual(expected, actual)
@skipCPUIfNoFFT
@dtypes(torch.cdouble)
def test_complex_istft_real_equiv(self, device, dtype):
test_args = list(product(
# input
(torch.rand(40, 20, device=device, dtype=dtype),
torch.rand(25, 1, device=device, dtype=dtype),
torch.rand(4, 20, 10, device=device, dtype=dtype)),
# hop_length
(None, 10),
# center
(False, True),
# normalized
(True, False),
))
for args in test_args:
x, hop_length, center, normalized = args
n_fft = x.size(-2)
expected = _complex_istft(x, n_fft, hop_length=hop_length,
center=center, normalized=normalized)
actual = torch.istft(x, n_fft, hop_length=hop_length,
center=center, normalized=normalized,
return_complex=True)
self.assertEqual(expected, actual)
@skipCPUIfNoFFT
def test_complex_stft_onesided(self, device):
# stft of complex input cannot be onesided
for x_dtype, window_dtype in product((torch.double, torch.cdouble), repeat=2):
x = torch.rand(100, device=device, dtype=x_dtype)
window = torch.rand(10, device=device, dtype=window_dtype)
if x_dtype.is_complex or window_dtype.is_complex:
with self.assertRaisesRegex(RuntimeError, 'complex'):
x.stft(10, window=window, pad_mode='constant', onesided=True)
else:
y = x.stft(10, window=window, pad_mode='constant', onesided=True,
return_complex=True)
self.assertEqual(y.dtype, torch.cdouble)
self.assertEqual(y.size(), (6, 51))
x = torch.rand(100, device=device, dtype=torch.cdouble)
with self.assertRaisesRegex(RuntimeError, 'complex'):
x.stft(10, pad_mode='constant', onesided=True)
# stft is currently warning that it requires return-complex while an upgrader is written
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_stft_requires_complex(self, device):
x = torch.rand(100)
with self.assertRaisesRegex(RuntimeError, 'stft requires the return_complex parameter'):
y = x.stft(10, pad_mode='constant')
# stft and istft are currently warning if a window is not provided
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_stft_requires_window(self, device):
x = torch.rand(100)
with self.assertWarnsOnceRegex(UserWarning, "A window was not provided"):
y = x.stft(10, pad_mode='constant', return_complex=True)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_istft_requires_window(self, device):
stft = torch.rand((51, 5), dtype=torch.cdouble)
# 51 = 2 * n_fft + 1, 5 = number of frames
with self.assertWarnsOnceRegex(UserWarning, "A window was not provided"):
x = torch.istft(stft, n_fft=100, length=100)
@skipCPUIfNoFFT
def test_fft_input_modification(self, device):
# FFT functions should not modify their input (gh-34551)
signal = torch.ones((2, 2, 2), device=device)
signal_copy = signal.clone()
spectrum = torch.fft.fftn(signal, dim=(-2, -1))
self.assertEqual(signal, signal_copy)
spectrum_copy = spectrum.clone()
_ = torch.fft.ifftn(spectrum, dim=(-2, -1))
self.assertEqual(spectrum, spectrum_copy)
half_spectrum = torch.fft.rfftn(signal, dim=(-2, -1))
self.assertEqual(signal, signal_copy)
half_spectrum_copy = half_spectrum.clone()
_ = torch.fft.irfftn(half_spectrum_copy, s=(2, 2), dim=(-2, -1))
self.assertEqual(half_spectrum, half_spectrum_copy)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_fft_plan_repeatable(self, device):
# Regression test for gh-58724 and gh-63152
for n in [2048, 3199, 5999]:
a = torch.randn(n, device=device, dtype=torch.complex64)
res1 = torch.fft.fftn(a)
res2 = torch.fft.fftn(a.clone())
self.assertEqual(res1, res2)
a = torch.randn(n, device=device, dtype=torch.float64)
res1 = torch.fft.rfft(a)
res2 = torch.fft.rfft(a.clone())
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_simple_cases(self, device, dtype):
"""stft -> istft should recover the original signale"""
def _test(input, n_fft, length):
stft = torch.stft(input, n_fft=n_fft, return_complex=True)
inverse = torch.istft(stft, n_fft=n_fft, length=length)
self.assertEqual(input, inverse, exact_dtype=True)
_test(torch.ones(4, dtype=dtype, device=device), 4, 4)
_test(torch.zeros(4, dtype=dtype, device=device), 4, 4)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_various_params(self, device, dtype):
"""stft -> istft should recover the original signale"""
def _test_istft_is_inverse_of_stft(stft_kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
data_sizes = [(2, 20), (3, 15), (4, 10)]
num_trials = 100
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for sizes in data_sizes:
for i in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
inversed = torch.istft(stft, length=original.size(1), **istft_kwargs)
self.assertEqual(
inversed, original, msg='istft comparison against original',
atol=7e-6, rtol=0, exact_dtype=True)
patterns = [
# hann_window, centered, normalized, onesided
{
'n_fft': 12,
'hop_length': 4,
'win_length': 12,
'window': torch.hann_window(12, dtype=dtype, device=device),
'center': True,
'pad_mode': 'reflect',
'normalized': True,
'onesided': True,
},
# hann_window, centered, not normalized, not onesided
{
'n_fft': 12,
'hop_length': 2,
'win_length': 8,
'window': torch.hann_window(8, dtype=dtype, device=device),
'center': True,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
},
# hamming_window, centered, normalized, not onesided
{
'n_fft': 15,
'hop_length': 3,
'win_length': 11,
'window': torch.hamming_window(11, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': True,
'onesided': False,
},
# hamming_window, centered, not normalized, onesided
# window same size as n_fft
{
'n_fft': 5,
'hop_length': 2,
'win_length': 5,
'window': torch.hamming_window(5, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
},
]
for i, pattern in enumerate(patterns):
_test_istft_is_inverse_of_stft(pattern)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_round_trip_with_padding(self, device, dtype):
"""long hop_length or not centered may cause length mismatch in the inversed signal"""
def _test_istft_is_inverse_of_stft_with_padding(stft_kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
num_trials = 100
sizes = stft_kwargs['size']
del stft_kwargs['size']
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for i in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
with self.assertWarnsOnceRegex(UserWarning, "The length of signal is shorter than the length parameter."):
inversed = torch.istft(stft, length=original.size(-1), **istft_kwargs)
n_frames = stft.size(-1)
if stft_kwargs["center"] is True:
len_expected = stft_kwargs["n_fft"] // 2 + stft_kwargs["hop_length"] * (n_frames - 1)
else:
len_expected = stft_kwargs["n_fft"] + stft_kwargs["hop_length"] * (n_frames - 1)
# trim the original for case when constructed signal is shorter than original
padding = inversed[..., len_expected:]
inversed = inversed[..., :len_expected]
original = original[..., :len_expected]
# test the padding points of the inversed signal are all zeros
zeros = torch.zeros_like(padding, device=padding.device)
self.assertEqual(
padding, zeros, msg='istft padding values against zeros',
atol=7e-6, rtol=0, exact_dtype=True)
self.assertEqual(
inversed, original, msg='istft comparison against original',
atol=7e-6, rtol=0, exact_dtype=True)
patterns = [
# hamming_window, not centered, not normalized, not onesided
# window same size as n_fft
{
'size': [2, 20],
'n_fft': 3,
'hop_length': 2,
'win_length': 3,
'window': torch.hamming_window(3, dtype=dtype, device=device),
'center': False,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
},
# hamming_window, centered, not normalized, onesided, long hop_length
# window same size as n_fft
{
'size': [2, 500],
'n_fft': 256,
'hop_length': 254,
'win_length': 256,
'window': torch.hamming_window(256, dtype=dtype, device=device),
'center': True,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
},
]
for i, pattern in enumerate(patterns):
_test_istft_is_inverse_of_stft_with_padding(pattern)
@onlyNativeDeviceTypes
def test_istft_throws(self, device):
"""istft should throw exception for invalid parameters"""
stft = torch.zeros((3, 5, 2), device=device)
# the window is size 1 but it hops 20 so there is a gap which throw an error
self.assertRaises(
RuntimeError, torch.istft, stft, n_fft=4,
hop_length=20, win_length=1, window=torch.ones(1))
# A window of zeros does not meet NOLA
invalid_window = torch.zeros(4, device=device)
self.assertRaises(
RuntimeError, torch.istft, stft, n_fft=4, win_length=4, window=invalid_window)
# Input cannot be empty
self.assertRaises(RuntimeError, torch.istft, torch.zeros((3, 0, 2)), 2)
self.assertRaises(RuntimeError, torch.istft, torch.zeros((0, 3, 2)), 2)
@skipIfTorchDynamo("Failed running call_function")
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_of_sine(self, device, dtype):
complex_dtype = corresponding_complex_dtype(dtype)
def _test(amplitude, L, n):
# stft of amplitude*sin(2*pi/L*n*x) with the hop length and window size equaling L
x = torch.arange(2 * L + 1, device=device, dtype=dtype)
original = amplitude * torch.sin(2 * math.pi / L * x * n)
# stft = torch.stft(original, L, hop_length=L, win_length=L,
# window=torch.ones(L), center=False, normalized=False)
stft = torch.zeros((L // 2 + 1, 2), device=device, dtype=complex_dtype)
stft_largest_val = (amplitude * L) / 2.0
if n < stft.size(0):
stft[n].imag = torch.tensor(-stft_largest_val, dtype=dtype)
if 0 <= L - n < stft.size(0):
# symmetric about L // 2
stft[L - n].imag = torch.tensor(stft_largest_val, dtype=dtype)
inverse = torch.istft(
stft, L, hop_length=L, win_length=L,
window=torch.ones(L, device=device, dtype=dtype), center=False, normalized=False)
# There is a larger error due to the scaling of amplitude
original = original[..., :inverse.size(-1)]
self.assertEqual(inverse, original, atol=1e-3, rtol=0)
_test(amplitude=123, L=5, n=1)
_test(amplitude=150, L=5, n=2)
_test(amplitude=111, L=5, n=3)
_test(amplitude=160, L=7, n=4)
_test(amplitude=145, L=8, n=5)
_test(amplitude=80, L=9, n=6)
_test(amplitude=99, L=10, n=7)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
@dtypes(torch.double)
def test_istft_linearity(self, device, dtype):
num_trials = 100
complex_dtype = corresponding_complex_dtype(dtype)
def _test(data_size, kwargs):
for i in range(num_trials):
tensor1 = torch.randn(data_size, device=device, dtype=complex_dtype)
tensor2 = torch.randn(data_size, device=device, dtype=complex_dtype)
a, b = torch.rand(2, dtype=dtype, device=device)
# Also compare method vs. functional call signature
istft1 = tensor1.istft(**kwargs)
istft2 = tensor2.istft(**kwargs)
istft = a * istft1 + b * istft2
estimate = torch.istft(a * tensor1 + b * tensor2, **kwargs)
self.assertEqual(istft, estimate, atol=1e-5, rtol=0)
patterns = [
# hann_window, centered, normalized, onesided
(
(2, 7, 7),
{
'n_fft': 12,
'window': torch.hann_window(12, device=device, dtype=dtype),
'center': True,
'normalized': True,
'onesided': True,
},
),
# hann_window, centered, not normalized, not onesided
(
(2, 12, 7),
{
'n_fft': 12,
'window': torch.hann_window(12, device=device, dtype=dtype),
'center': True,
'normalized': False,
'onesided': False,
},
),
# hamming_window, centered, normalized, not onesided
(
(2, 12, 7),
{
'n_fft': 12,
'window': torch.hamming_window(12, device=device, dtype=dtype),
'center': True,
'normalized': True,
'onesided': False,
},
),
# hamming_window, not centered, not normalized, onesided
(
(2, 7, 3),
{
'n_fft': 12,
'window': torch.hamming_window(12, device=device, dtype=dtype),
'center': False,
'normalized': False,
'onesided': True,
},
)
]
for data_size, kwargs in patterns:
_test(data_size, kwargs)
@onlyNativeDeviceTypes
@skipCPUIfNoFFT
def test_batch_istft(self, device):
original = torch.tensor([
[4., 4., 4., 4., 4.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]
], device=device, dtype=torch.complex64)
single = original.repeat(1, 1, 1)
multi = original.repeat(4, 1, 1)
i_original = torch.istft(original, n_fft=4, length=4)
i_single = torch.istft(single, n_fft=4, length=4)
i_multi = torch.istft(multi, n_fft=4, length=4)
self.assertEqual(i_original.repeat(1, 1), i_single, atol=1e-6, rtol=0, exact_dtype=True)
self.assertEqual(i_original.repeat(4, 1), i_multi, atol=1e-6, rtol=0, exact_dtype=True)
@onlyCUDA
@skipIf(not TEST_MKL, "Test requires MKL")
def test_stft_window_device(self, device):
# Test the (i)stft window must be on the same device as the input
x = torch.randn(1000, dtype=torch.complex64)
window = torch.randn(100, dtype=torch.complex64)
with self.assertRaisesRegex(RuntimeError, "stft input and window must be on the same device"):
torch.stft(x, n_fft=100, window=window.to(device))
with self.assertRaisesRegex(RuntimeError, "stft input and window must be on the same device"):
torch.stft(x.to(device), n_fft=100, window=window)
X = torch.stft(x, n_fft=100, window=window)
with self.assertRaisesRegex(RuntimeError, "istft input and window must be on the same device"):
torch.istft(X, n_fft=100, window=window.to(device))
with self.assertRaisesRegex(RuntimeError, "istft input and window must be on the same device"):
torch.istft(x.to(device), n_fft=100, window=window)
class FFTDocTestFinder:
'''The default doctest finder doesn't like that function.__module__ doesn't
match torch.fft. It assumes the functions are leaked imports.
'''
def __init__(self) -> None:
self.parser = doctest.DocTestParser()
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
doctests = []
modname = name if name is not None else obj.__name__
globs = {} if globs is None else globs
for fname in obj.__all__:
func = getattr(obj, fname)
if inspect.isroutine(func):
qualname = modname + '.' + fname
docstring = inspect.getdoc(func)
if docstring is None:
continue
examples = self.parser.get_doctest(
docstring, globs=globs, name=fname, filename=None, lineno=None)
doctests.append(examples)
return doctests
class TestFFTDocExamples(TestCase):
pass
|
import torch
import unittest
import math
from contextlib import contextmanager
from itertools import product
import itertools
import doctest
import inspect
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_NUMPY, TEST_LIBROSA, TEST_MKL, first_sample, TEST_WITH_ROCM,
make_tensor)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, onlyNativeDeviceTypes,
skipCPUIfNoFFT, deviceCountAtLeast, onlyCUDA, OpDTypes, skipIf, toleranceOverride, tol)
from torch.testing._internal.common_methods_invocations import (
spectral_funcs, SpectralFuncType)
from torch.testing._internal.common_cuda import SM53OrLater
from torch._prims_common import corresponding_complex_dtype
from setuptools import distutils
from typing import Optional, List
import numpy as np
import librosa
has_scipy_fft = False
import scipy.fft
LooseVersion = distutils.version.LooseVersion
REFERENCE_NORM_MODES = (
(None, "forward", "backward", "ortho")
if LooseVersion(np.__version__) >= '1.20.0' and (
not has_scipy_fft or LooseVersion(scipy.__version__) >= '1.6.0')
else (None, "ortho"))
|
import torch
import unittest
import math
from contextlib import contextmanager
from itertools import product
import itertools
import doctest
import inspect
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_NUMPY, TEST_LIBROSA, TEST_MKL, first_sample, TEST_WITH_ROCM,
make_tensor, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, onlyNativeDeviceTypes,
skipCPUIfNoFFT, deviceCountAtLeast, onlyCUDA, OpDTypes, skipIf, toleranceOverride, tol)
from torch.testing._internal.common_methods_invocations import (
spectral_funcs, SpectralFuncType)
from torch.testing._internal.common_cuda import SM53OrLater
from torch._prims_common import corresponding_complex_dtype
from typing import Optional, List
from packaging import version
import numpy as np
import librosa
has_scipy_fft = False
import scipy.fft
REFERENCE_NORM_MODES = (
(None, "forward", "backward", "ortho")
if version.parse(np.__version__) >= version.parse('1.20.0') and (
not has_scipy_fft or version.parse(scipy.__version__) >= version.parse('1.6.0'))
else (None, "ortho"))
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
make_zero_batched
|
for index_dtype in [torch.int32, torch.int64]:
for (compressed_indices, plain_indices, values), kwargs in self.generate_simple_inputs(
layout, device=device, dtype=dtype, index_dtype=index_dtype, output_tensor=False):
size = kwargs['size']
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout)
|
def make_zero_batched(t):
return torch.empty(*((0,) + t.shape), dtype=t.dtype, device=t.device)
for index_dtype in [torch.int32, torch.int64]:
for (compressed_indices, plain_indices, values), kwargs in self.generate_simple_inputs(
layout, device=device, dtype=dtype, index_dtype=index_dtype, output_tensor=False):
size = kwargs['size']
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout)
# check empty batch
torch._validate_sparse_compressed_tensor_args(
*(make_zero_batched(t) for t in (compressed_indices, plain_indices, values)),
(0,) + size,
layout
)
compressed_indices = torch.tensor([0, 0], dtype=index_dtype)
plain_indices = torch.tensor([], dtype=index_dtype)
torch._validate_compressed_sparse_indices(layout in {torch.sparse_csr, torch.sparse_bsr},
compressed_indices, plain_indices, 1, 1, 0)
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse_csr.py
|
values
|
def values(lst, device=device):
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
lst = [[[item]] for item in lst]
return torch.tensor(lst, device=device)
tensor = partial(torch.tensor, device=device)
values = partial(values, device=device)
yield ('incontiguous compressed_indices',
tensor([0, -1, 2, -1, 4, -1])[::2],
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'expected compressed_indices to be a contiguous tensor per batch')
yield ('incontiguous plain_indices',
tensor([0, 2, 4]),
tensor([0, -1, 1, -1, 0, -1, 2, -1])[::2],
values([1, 2, 3, 4]),
shape((2, 3)),
'expected plain_indices to be a contiguous tensor per batch')
yield ('0-D compressed_indices',
tensor(0),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'compressed_indices must have dimensionality >= 1 but got 0')
yield ('compressed/plain_indices mismatch of dimensionalities',
tensor([[0, 2, 4]]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'compressed_indices and plain_indices dimensionalities must be equal but got 2 and 1, respectively')
if layout in {torch.sparse_csr, torch.sparse_csc}:
yield ('indices and values mismatch of dimensionalities',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'values must have dimensionality > sum of batch and block dimensionalities \(=1 \+ 0\) but got 1')
else:
yield ('indices and values mismatch of dimensionalities',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'values must have dimensionality > sum of batch and block dimensionalities \(=1 \+ 2\) but got 3')
yield ('invalid size',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
(2,),
r'tensor dimensionality must be sum of batch, base, and dense dimensionalities \(=0 \+ 2 \+ 0\) but got 1')
yield ('invalid batchsize',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([[1, 2, 3, 4]]),
shape((2, 2, 3), 1),
r'all batch dimensions of compressed_indices \(=\[1\]\), plain_indices \(=\[1\]\), '
r'and values \(=\[1\]\) must be equal to tensor batch dimensions \(=\[2\]\)')
if layout is torch.sparse_bsr:
yield ('invalid blocksize',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
tensor([[[1, 11]], [[2, 22]], [[3, 33]], [[4, 33]]]),
shape((2, 3)),
r'tensor shape\[1\] \(=3\) must be divisible with blocksize\[1\] \(=2\) as defined by values shape')
if layout is torch.sparse_bsc:
yield ('invalid blocksize',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
tensor([[[1, 11]], [[2, 22]], [[3, 33]], [[4, 33]]]),
shape((3, 2)),
r'tensor shape\[1\] \(=3\) must be divisible with blocksize\[1\] \(=2\) as defined by values shape')
yield ('invalid compressed_indices shape',
tensor([0, 2, 3, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices.shape\[-1\] must be equal to the number of compressed_indices_names \+ 1 \(=3\), but got 4')
yield ('invalid compressed_indices shape',
tensor([0, 2, 4]),
tensor([0, 1, 0, 1, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'plain_indices.shape\[-1\] must be equal to nnz \(=4\) as defined by values.shape\[0\], but got 5')
yield ('compressed/plain_indices mismatch of dtype',
tensor([0, 2, 4], dtype=torch.int32),
tensor([0, 1, 0, 2], dtype=torch.int64),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices and plain_indices must have the same dtype, bot got Int and Long, respectively')
yield ('invalid compressed/plain_indices dtype',
tensor([0, 2, 4], dtype=torch.int16),
tensor([0, 1, 0, 2], dtype=torch.int16),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices and plain_indices dtype must be Int or Long, but got Short')
# CUDA kernel asserts are not recoverable, so we skip these for now
if torch.device(device).type == 'cpu':
yield ('invalid compressed_indices[0]',
tensor([1, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`compressed_indices\[..., 0\] == 0` is not satisfied.')
yield ('invalid compressed_indices[-1]',
tensor([0, 2, 5]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`compressed_indices\[..., -1\] == nnz` is not satisfied.')
yield ('invalid compressed_indices.diff(dim=-1)',
tensor([0, 0, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'0 <= compressed_indices\[..., 1:\] - compressed_indices\[..., :\-1\] <= plain_dim` is not satisfied.')
yield ('invalid compressed_indices.diff(dim=-1)',
tensor([0, 5, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'0 <= compressed_indices\[..., 1:\] - compressed_indices\[..., :\-1\] <= plain_dim` is not satisfied.')
yield ('invalid min(plain_indices)',
tensor([0, 2, 4]),
tensor([0, -1, 0, 3]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`0 <= plain_indices < plain_dim` is not satisfied.')
yield ('invalid max(plain_indices)',
tensor([0, 2, 4]),
tensor([0, 1, 0, 3]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`0 <= plain_indices < plain_dim` is not satisfied.')
yield ('non-coalesced',
tensor([0, 2, 4]),
tensor([1, 0, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`plain_indices\[..., compressed_indices\[..., i - 1\]:compressed_indices\[..., i\]\] '
'for all i = 1, ..., compressed_dim '
'are sorted and distinct along the last dimension values` is not satisfied.')
if TEST_CUDA and torch.device(device).type == 'cpu':
yield ('indices and values mismatch of device',
torch.tensor([0, 2, 4]),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4], device='cuda'),
shape((2, 3)),
r'device of compressed_indices \(=cpu\) must match device of values \(=cuda:0\)')
yield ('compressed_indices and values mismatch of device',
torch.tensor([0, 2, 4], device='cuda'),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!')
yield ('compressed/plain_indices mismatch of device',
torch.tensor([0, 2, 4], device='cuda'),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4], device='cuda'),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!')
if TEST_CUDA and torch.device(device).type == 'cuda' and torch.cuda.device_count() >= 2:
yield ('indices and values mismatch of device index',
torch.tensor([0, 2, 4], device='cuda:0'),
torch.tensor([0, 1, 0, 1], device='cuda:0'),
values([1, 2, 3, 4], device='cuda:1'),
shape((2, 3)),
r'device of compressed_indices \(=cuda:0\) must match device of values \(=cuda:1\)')
yield ('compressed_indices and values mismatch of device index',
torch.tensor([0, 2, 4], device='cuda:0'),
torch.tensor([0, 1, 0, 1], device='cuda:1'),
values([1, 2, 3, 4], device='cuda:0'),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cuda:1!')
|
def values(lst, device=device):
if layout in {torch.sparse_bsr, torch.sparse_bsc}:
lst = [[[item]] for item in lst]
return torch.tensor(lst, device=device)
tensor = partial(torch.tensor, device=device)
values = partial(values, device=device)
yield ('incontiguous compressed_indices',
tensor([0, -1, 2, -1, 4, -1])[::2],
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'expected compressed_indices to be a contiguous tensor per batch')
yield ('incontiguous plain_indices',
tensor([0, 2, 4]),
tensor([0, -1, 1, -1, 0, -1, 2, -1])[::2],
values([1, 2, 3, 4]),
shape((2, 3)),
'expected plain_indices to be a contiguous tensor per batch')
yield ('0-D compressed_indices',
tensor(0),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'compressed_indices must have dimensionality >= 1 but got 0')
yield ('compressed/plain_indices mismatch of dimensionalities',
tensor([[0, 2, 4]]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'compressed_indices and plain_indices dimensionalities must be equal but got 2 and 1, respectively')
if layout in {torch.sparse_csr, torch.sparse_csc}:
yield ('indices and values mismatch of dimensionalities',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'values must have dimensionality > sum of batch and block dimensionalities \(=1 \+ 0\) but got 1')
else:
yield ('indices and values mismatch of dimensionalities',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'values must have dimensionality > sum of batch and block dimensionalities \(=1 \+ 2\) but got 3')
yield ('invalid size',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
(2,),
r'tensor dimensionality must be sum of batch, base, and dense dimensionalities \(=0 \+ 2 \+ 0\) but got 1')
yield ('invalid batchsize',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([[1, 2, 3, 4]]),
shape((2, 2, 3), 1),
r'all batch dimensions of compressed_indices \(=\[1\]\), plain_indices \(=\[1\]\), '
r'and values \(=\[1\]\) must be equal to tensor batch dimensions \(=\[2\]\)')
if layout is torch.sparse_bsr:
yield ('invalid blocksize',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
tensor([[[1, 11]], [[2, 22]], [[3, 33]], [[4, 33]]]),
shape((2, 3)),
r'tensor shape\[1\] \(=3\) must be divisible with blocksize\[1\] \(=2\) as defined by values shape')
if layout is torch.sparse_bsc:
yield ('invalid blocksize',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
tensor([[[1, 11]], [[2, 22]], [[3, 33]], [[4, 33]]]),
shape((3, 2)),
r'tensor shape\[1\] \(=3\) must be divisible with blocksize\[1\] \(=2\) as defined by values shape')
yield ('invalid compressed_indices shape',
tensor([0, 2, 3, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices.shape\[-1\] must be equal to the number of compressed_indices_names \+ 1 \(=3\), but got 4')
yield ('invalid compressed_indices shape',
tensor([0, 2, 4]),
tensor([0, 1, 0, 1, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'plain_indices.shape\[-1\] must be equal to nnz \(=4\) as defined by values.shape\[0\], but got 5')
yield ('compressed/plain_indices mismatch of dtype',
tensor([0, 2, 4], dtype=torch.int32),
tensor([0, 1, 0, 2], dtype=torch.int64),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices and plain_indices must have the same dtype, bot got Int and Long, respectively')
yield ('invalid compressed/plain_indices dtype',
tensor([0, 2, 4], dtype=torch.int16),
tensor([0, 1, 0, 2], dtype=torch.int16),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices and plain_indices dtype must be Int or Long, but got Short')
# CUDA kernel asserts are not recoverable, so we skip these for now
if torch.device(device).type == 'cpu':
yield ('invalid compressed_indices[0]',
tensor([1, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`compressed_indices\[..., 0\] == 0` is not satisfied.')
yield ('invalid compressed_indices[0] when nnz == 0',
tensor([1, 0], dtype=torch.int64),
tensor([], dtype=torch.int64),
values([1])[:0],
shape((1, 1)),
r'`compressed_indices\[..., 0\] == 0` is not satisfied.')
yield ('invalid compressed_indices[-1]',
tensor([0, 2, 5]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`compressed_indices\[..., -1\] == nnz` is not satisfied.')
yield ('invalid compressed_indices[-1] when nnz == 0',
tensor([0, 1], dtype=torch.int64),
tensor([], dtype=torch.int64),
values([1])[:0],
shape((1, 1)),
r'`compressed_indices\[..., -1\] == nnz` is not satisfied.')
yield ('invalid compressed_indices.diff(dim=-1)',
tensor([0, 0, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'0 <= compressed_indices\[..., 1:\] - compressed_indices\[..., :\-1\] <= plain_dim` is not satisfied.')
yield ('invalid compressed_indices.diff(dim=-1)',
tensor([0, 5, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'0 <= compressed_indices\[..., 1:\] - compressed_indices\[..., :\-1\] <= plain_dim` is not satisfied.')
yield ('invalid min(plain_indices)',
tensor([0, 2, 4]),
tensor([0, -1, 0, 3]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`0 <= plain_indices < plain_dim` is not satisfied.')
yield ('invalid max(plain_indices)',
tensor([0, 2, 4]),
tensor([0, 1, 0, 3]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`0 <= plain_indices < plain_dim` is not satisfied.')
yield ('non-coalesced',
tensor([0, 2, 4]),
tensor([1, 0, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`plain_indices\[..., compressed_indices\[..., i - 1\]:compressed_indices\[..., i\]\] '
'for all i = 1, ..., compressed_dim '
'are sorted and distinct along the last dimension values` is not satisfied.')
if TEST_CUDA and torch.device(device).type == 'cpu':
yield ('indices and values mismatch of device',
torch.tensor([0, 2, 4]),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4], device='cuda'),
shape((2, 3)),
r'device of compressed_indices \(=cpu\) must match device of values \(=cuda:0\)')
yield ('compressed_indices and values mismatch of device',
torch.tensor([0, 2, 4], device='cuda'),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!')
yield ('compressed/plain_indices mismatch of device',
torch.tensor([0, 2, 4], device='cuda'),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4], device='cuda'),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!')
if TEST_CUDA and torch.device(device).type == 'cuda' and torch.cuda.device_count() >= 2:
yield ('indices and values mismatch of device index',
torch.tensor([0, 2, 4], device='cuda:0'),
torch.tensor([0, 1, 0, 1], device='cuda:0'),
values([1, 2, 3, 4], device='cuda:1'),
shape((2, 3)),
r'device of compressed_indices \(=cuda:0\) must match device of values \(=cuda:1\)')
yield ('compressed_indices and values mismatch of device index',
torch.tensor([0, 2, 4], device='cuda:0'),
torch.tensor([0, 1, 0, 1], device='cuda:1'),
values([1, 2, 3, 4], device='cuda:0'),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cuda:1!')
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
test_select_copy
|
def test_select_copy(self, device, dtype, index_dtype, layout):
def is_view_of(base, other):
# a shameless copy of TestViewOps.is_view_of
if ((not other._is_view() or
other is base or
other._base is not base or
base.device != other.device)):
return False
if base.device.type == 'cpu' or base.device.type == 'cuda':
if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr():
return False
return True
kwargs = dict(device=device, dtype=dtype, index_dtype=index_dtype)
for sparse, dense in zip(self.generate_simple_inputs(layout, **kwargs),
self.generate_simple_inputs(torch.strided, **kwargs)):
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_batchdim = sparse.crow_indices().ndim - 1
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
n_batchdim = sparse.ccol_indices().ndim - 1
else:
assert 0 # unreachable
self.assertEqual(sparse, dense)
for dim in range(sparse.ndim):
if sparse.shape[dim] == 0:
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(sparse, dim, 0)
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(dense, dim, 0)
elif n_batchdim and dim >= n_batchdim and dim < n_batchdim + 2:
with self.assertRaisesRegex(
RuntimeError,
"selecting sparse dimensions is not implemented for batched sparse compressed tensors"):
torch.select_copy(sparse, dim, 0)
else:
for index in {0, sparse.shape[dim] // 2, sparse.shape[dim] - 1}:
dense_select = torch.select_copy(dense, dim, index)
sparse_select = torch.select_copy(sparse, dim, index)
self.assertEqual(sparse_select, dense_select)
self.assertFalse(is_view_of(sparse_select.values(), sparse.values()))
|
def test_select_copy(self, device, dtype, index_dtype, layout):
def is_view_of(base, other):
# a shameless copy of TestViewOps.is_view_of
if (
not other._is_view() or
other is base or
other._base is not base or
base.device != other.device
):
return False
if base.device.type in ('cpu', 'cuda'):
if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr():
return False
return True
kwargs = dict(device=device, dtype=dtype, index_dtype=index_dtype)
for sparse, dense in zip(self.generate_simple_inputs(layout, **kwargs),
self.generate_simple_inputs(torch.strided, **kwargs)):
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_batchdim = sparse.crow_indices().ndim - 1
elif layout in {torch.sparse_csc, torch.sparse_bsc}:
n_batchdim = sparse.ccol_indices().ndim - 1
else:
assert 0 # unreachable
self.assertEqual(sparse, dense)
for dim in range(sparse.ndim):
if sparse.shape[dim] == 0:
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(sparse, dim, 0)
with self.assertRaisesRegex(IndexError, "index 0 out of range for tensor of size"):
torch.select_copy(dense, dim, 0)
elif n_batchdim and dim >= n_batchdim and dim < n_batchdim + 2:
with self.assertRaisesRegex(
RuntimeError,
"selecting sparse dimensions is not supported for batched sparse compressed tensors"):
torch.select_copy(sparse, dim, 0)
else:
for index in {0, sparse.shape[dim] // 2, sparse.shape[dim] - 1}:
dense_select = torch.select_copy(dense, dim, index)
sparse_select = torch.select_copy(sparse, dim, index)
self.assertEqual(sparse_select, dense_select)
self.assertFalse(is_view_of(sparse_select.values(), sparse.values()))
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
class TestSparseCompressed(TestCase):
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
class TestSparseCompressed(TestCase):
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
get_current_meta
|
def get_current_meta():
version = (0, dtype, sparsity)
meta_key = (M, K, N, *blocksize, False, True, True)
return get_meta(op, meta_key, version=version, exact=True)
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
test_triton_bsr_dense_addmm_meta
|
def test_triton_bsr_dense_addmm_meta(self, device):
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
dtype = torch.float32
Ms = Ks = 16
beta = 0.0
alpha = 1.0
def get_meta(M, K, N, sparsity=None):
return bsr_dense_addmm_meta(M, K, N, Ms, Ks, beta, alpha, dtype=dtype, sparsity=sparsity,
_version="test_triton_bsr_dense_addmm_meta")
def update_meta(M, K, N, value, sparsity=0.5):
key = (M, K, N, Ms, Ks, beta == 0, beta == 1, alpha == 1)
update_bsr_dense_addmm_meta("bsr_dense_addmm", torch.cuda.get_device_name(),
("test_triton_bsr_dense_addmm_meta", dtype, sparsity),
key, value)
def get_meta_with_checks(M, K, N, warn_count=0, sparsity=None):
f = io.StringIO()
with redirect_stderr(f):
result = get_meta(M, K, N, sparsity=sparsity)
msg = f.getvalue()
FileCheck().check_count(
str=f"UserWarning: bsr_dense_addmm uses non-optimal triton kernel parameters for M={M} K={K} N={N}",
count=warn_count, exactly=True
).run(msg)
return result
# Test warn_once when requesting non-existing tuned parameters multiple times
f = io.StringIO()
with redirect_stderr(f):
for i in range(5):
get_meta(16, 16, 16)
for i in range(5):
get_meta(16, 16, 32)
msg = f.getvalue()
FileCheck().check_count(
str="UserWarning: bsr_dense_addmm uses non-optimal triton kernel parameters for M=16 K=16 N=16", count=1, exactly=True
).run(msg)
FileCheck().check_count(
str="UserWarning: bsr_dense_addmm uses non-optimal triton kernel parameters for M=16 K=16 N=32", count=1, exactly=True
).run(msg)
# Test warn_once when tuned parameters are missing
default_meta = dict(GROUP_SIZE_ROW=4, SPLIT_N=2, num_stages=1, num_warps=4)
self.assertEqual(get_meta_with_checks(32, 32, 32, warn_count=1), default_meta)
# Test (no)warn_once when tuned parameters are available
update_meta(32, 32, 48, (2, 8, 5, 6))
expected_meta = dict(GROUP_SIZE_ROW=2, SPLIT_N=8, num_stages=5, num_warps=6)
self.assertEqual(get_meta_with_checks(32, 32, 48, warn_count=0), expected_meta)
# Test non-existing tuned parameters with non-default sparsity
# while for default sparsity 0.5 the parameters are available
self.assertEqual(get_meta_with_checks(32, 32, 48, warn_count=0, sparsity=0.6), expected_meta)
# Test non-existing tuned parameters while there exists
# parameters with consistent N // SPLIT_N ratio:
self.assertEqual(get_meta_with_checks(32, 32, 72, warn_count=0),
dict(GROUP_SIZE_ROW=2, SPLIT_N=12, num_stages=5, num_warps=6))
# ... or not:
self.assertEqual(get_meta_with_checks(32, 32, 64, warn_count=1),
dict(GROUP_SIZE_ROW=4, SPLIT_N=4, num_stages=1, num_warps=4))
# e.g., TestSparseCSRCPU and TestSparseCSRCUDA
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
@skipIfNoTriton
class TestSparseCompressedTritonKernels(TestCase):
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
get_meta
|
def get_meta(M, K, N, sparsity=None):
return bsr_dense_addmm_meta(M, K, N, Ms, Ks, beta, alpha, dtype=dtype, sparsity=sparsity,
_version="test_triton_bsr_dense_addmm_meta")
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
update_meta
|
def update_meta(M, K, N, value, sparsity=0.5):
key = (M, K, N, Ms, Ks, beta == 0, beta == 1, alpha == 1)
update_bsr_dense_addmm_meta("bsr_dense_addmm", torch.cuda.get_device_name(),
("test_triton_bsr_dense_addmm_meta", dtype, sparsity),
key, value)
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
sparse24_largest_mask_2d
|
def sparse24_largest_mask_2d(original):
sparse = SparseSemiStructuredTensorCUTLASS.prune_dense_static_sort(original)
return sparse.to_dense().bool()
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
sparsify24_dense
|
def sparsify24_dense(original):
return sparse24_largest_mask_2d(original) * original
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
rand_sparse_semi_structured
|
def rand_sparse_semi_structured(r, c, dtype, device, choice=None):
pattern = '2by4' if dtype != torch.float32 else '1by2'
if pattern == '1by2':
ksparse = 2
choices = [
[0, 1],
[1, 0]
]
elif pattern == '2by4':
ksparse = 4
choices = [
[1, 1, 0, 0],
[1, 0, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 1]
]
mask_entries = [choice or random.choice(choices) for i in range(r * c // ksparse)]
mask = torch.tensor(mask_entries, dtype=torch.bool).view(r, c).to(device)
dense = make_tensor(r, c, dtype=dtype, device=device)
dense[dense == 0] = 1 # To prevent zeros except where mask applied.
dense = dense.masked_fill(~mask, 0)
return dense
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
rand_sparse_semi_structured_all_patterns
|
def rand_sparse_semi_structured_all_patterns(r, c, dtype, device):
pattern = '2by4' if dtype != torch.float32 else '1by2'
if pattern == '1by2':
ksparse = 2
choices = [
[[0, 0], [0, 1]],
[[0, 1], [0, 1]],
[[1, 0], [1, 0]],
[[1, 1], [1, 0]]
]
elif pattern == '2by4':
ksparse = 4
choices = [
[[0, 0, 0, 0], [0, 0, 1, 1]],
[[0, 0, 0, 1], [0, 0, 1, 1]],
[[0, 0, 1, 0], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 1, 0, 0], [0, 1, 1, 0]],
[[0, 1, 0, 1], [0, 1, 0, 1]],
[[0, 1, 1, 0], [0, 1, 1, 0]],
[[0, 1, 1, 1], [0, 1, 0, 1]],
[[1, 0, 0, 0], [1, 0, 1, 0]],
[[1, 0, 0, 1], [1, 0, 0, 1]],
[[1, 0, 1, 0], [1, 0, 1, 0]],
[[1, 0, 1, 1], [1, 0, 0, 1]],
[[1, 1, 0, 0], [1, 1, 0, 0]],
[[1, 1, 0, 1], [1, 1, 0, 0]],
[[1, 1, 1, 0], [1, 1, 0, 0]],
[[1, 1, 1, 1], [1, 1, 0, 0]],
]
mask_rows = [random.randint(0, len(choices) - 1) for i in range(r * c // ksparse)]
COL_INV, COL_VAL = 0, 1
mask_entries_inv = [choices[i][COL_INV] for i in mask_rows]
mask_entries_val = [choices[i][COL_VAL] for i in mask_rows]
mask_inv = torch.tensor(mask_entries_inv, dtype=torch.bool).view(r, c).to(device)
mask_val = torch.tensor(mask_entries_val, dtype=torch.bool).view(r, c).to(device)
dense = make_tensor(r, c, dtype=dtype, device=device)
dense[dense == 0] = 1 # To prevent zeros except where mask below applied.
dense_inv = dense.masked_fill(~mask_inv, 0)
dense_val = dense_inv.masked_fill(~mask_val, 0)
return dense_inv, dense_val
class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
super().setUp()
def tearDown(self):
super().tearDown()
@staticmethod
def _test_mlp_contiguous_relu_compile(backend, dense_input_shape):
"""
Test nn.Linear + .contiguous() + nn.ReLU with SparseSemiStructuredTensor + torch.compile
We expect:
(1) The sparse tensor subclass should turn nn.Linear into `aten._structured_sparse_addmm` + `aten.contiguous()`
(2) Inductor should fuse the .contiguous() call into the relu
"""
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(128, 128)
def forward(self, x):
x = self.linear(x)
x = x.contiguous()
return torch.nn.functional.relu(x)
input = torch.rand(dense_input_shape, device="cuda").half()
model = Model().eval().cuda().half()
mod_linear = model.linear
m, n = mod_linear.weight.shape
mask = torch.Tensor([1, 0, 0, 1]).tile((m, n // 4)).bool().cuda()
# set masked weight
mod_linear.weight = nn.Parameter(mod_linear.weight * mask)
dense_result = model(input)
mod_linear.weight = nn.Parameter(SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].from_dense(mod_linear.weight))
sparse_result = model(input)
model = torch.compile(model, backend="inductor", fullgraph=True)
sparse_compile_result = model(input)
# test that sparse_compile_result and dense_result are numerically close
torch.testing.assert_close(dense_result, sparse_compile_result, rtol=1e-3, atol=1e-3)
# assert sparse and sparse_compile have the same strides,
# as meta registrations may return contiguous tensors when the output is transposed
# https://github.com/pytorch/pytorch/pull/114477
assert sparse_result.stride() == sparse_compile_result.stride()
@unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows")
@unittest.skipIf("cusparselt" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS, "cusparselt not supported on this machine")
def test_mlp_contiguous_relu_compile_cusparselt(self):
"""
test for cuSPASRELt meta registrations (_cslt_sparse_mm) + torch.compile
"""
for dense_input_shape in [(1, 128), (64, 128), (128, 128), (64, 128, 128)]:
SparseSemiStructuredTensorCompileTest._test_mlp_contiguous_relu_compile("cusparselt", dense_input_shape)
@unittest.skipIf("cutlass" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS, "cutlass not supported on this machine")
@unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows")
def test_mlp_contiguous_relu_compile_cutlass(self):
"""
test for CUTLASS meta registrations (_sparse_semi_structured_addmm) + torch.compile
"""
for dense_input_shape in [(1, 128), (64, 128), (128, 128), (64, 128, 128)]:
SparseSemiStructuredTensorCompileTest._test_mlp_contiguous_relu_compile("cutlass", dense_input_shape)
@unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows")
@unittest.skipIf("cusparselt" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS, "cusparselt not supported on this machine")
def test_sp24_compile(self) -> None:
x = torch.randn([1024, 512], device="cuda", dtype=torch.float16, requires_grad=True)
e = torch.eye(x.shape[0], x.shape[0], device="cuda", dtype=torch.float16)
def fn(x, e):
y = SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort(x)
y = y.t()
return x @ y
# Eager
output = fn(x, e)
output.backward(output)
# Torch compile
output = torch.compile(fn)(x, e)
output.backward(output)
class TestSparseSemiStructured(TestCase):
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
if IS_WINDOWS:
self.skipTest("torch.compile not supported on windows")
@inference_dtypes
@parametrize_backends
def test_to_sparse_semi_structured(self, dtype, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
A = rand_sparse_semi_structured_mask(128, 256, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
assert A.shape == A_sparse.shape
assert A.device == A_sparse.device
assert A.dtype == A_sparse.dtype
assert isinstance(A, torch.Tensor)
assert isinstance(A_sparse, SparseSemiStructuredTensor)
@inference_dtypes
@parametrize_backends
@parametrize("dense_input_shape", [(128, 1), (128, 64), (128, 128)])
def test_mm_sparse_first_NN(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A_sparse, B) is correct for float16 and will throw error for int8
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
A = rand_sparse_semi_structured_mask(256, 128, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
B = torch.rand(dense_input_shape, device=A_sparse.device).to(dtype)
# Currently we don't support int matmul on GPU, so evaluate on CPU and copy over
if dtype is torch.int8:
if backend == "cutlass":
with self.assertRaisesRegex(RuntimeError, "spgemm_cutlass_dispatch_layouts"):
sparse_result = torch.mm(A_sparse, B)
else:
with self.assertRaisesRegex(RuntimeError,
"CUDA error: operation not supported when calling `cusparseLtMatmulDescriptorInit"):
sparse_result = torch.mm(A_sparse, B)
else:
dense_result = torch.mm(A, B)
sparse_result = torch.mm(A_sparse, B)
torch.testing.assert_close(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
@inference_dtypes
@parametrize_backends
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
def test_mm_sparse_first_NT(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A_sparse, B.t()) is correct for float16/bfloat16
and will throw an error for int8 + padding
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
A = rand_sparse_semi_structured_mask(256, 128, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
B = torch.rand(dense_input_shape, device=A_sparse.device).to(dtype)
# Currently we don't support int matmul on GPU, so evaluate on CPU and copy over
if dtype is torch.int8 and dense_input_shape in {(1, 128)}:
# padding with int8 throws an error because transposing B yields a contiguous output
# and row-row 2:4 sparse @ dense with NN is not supported by cuSPARSELt or CUTLASS.
if backend == "cutlass":
with self.assertRaisesRegex(RuntimeError, "spgemm_cutlass_dispatch_layouts"):
sparse_result = torch.mm(A_sparse, B.t())
else:
with self.assertRaisesRegex(RuntimeError,
"CUDA error: operation not supported when calling `cusparseLtMatmulDescriptorInit"):
sparse_result = torch.mm(A_sparse, B.t())
elif dtype is torch.int8:
# test transpose
dense_result = torch.mm(A.cpu(), B.t().cpu()).to(device, dtype=torch.int8)
sparse_result = torch.mm(A_sparse, B.t())
torch.testing.assert_close(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
else:
# test transpose
dense_result = torch.mm(A, B.t())
sparse_result = torch.mm(A_sparse, B.t())
torch.testing.assert_close(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
@inference_dtypes
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
@parametrize_backends
def test_mm_sparse_first_TN(self, dtype, dense_input_shape, device, backend):
"""
Ensure torch.mm(A_sparse.t(), B) throws error
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = rand_sparse_semi_structured_mask(128, 256, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
B = torch.rand(dense_input_shape, device=A_sparse.device).to(dtype)
with self.assertRaisesRegex(
NotImplementedError,
r"`SparseSemiStructuredTensor.*` matmul: operation is not supported",
):
torch.mm(A_sparse.t(), B)
@inference_dtypes
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
@parametrize_backends
def test_mm_sparse_second_NT(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A, B_sparse.t()) is correct
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
B = rand_sparse_semi_structured_mask(256, 128, dtype=dtype)
B_sparse = to_sparse_semi_structured(B)
A = torch.rand(dense_input_shape, device=B_sparse.device).to(dtype)
# Currently we don't support int matmul on GPU, so evaluate on CPU and copy over
if dtype is torch.int8:
dense_result = torch.mm(A.cpu(), B.t().cpu()).to(device, dtype=torch.int8)
sparse_result = torch.mm(A, B_sparse.t())
else:
dense_result = torch.mm(A, B.t())
sparse_result = torch.mm(A, B_sparse.t())
torch.testing.assert_close(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
@inference_dtypes
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
@parametrize_backends
def test_mm_sparse_second_NN(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A, B_sparse) throws error
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
B = rand_sparse_semi_structured_mask(256, 128, dtype=dtype)
B_sparse = to_sparse_semi_structured(B)
A = torch.rand(dense_input_shape, device=B_sparse.device).to(dtype)
with self.assertRaisesRegex(
NotImplementedError,
r"`SparseSemiStructuredTensor.*` matmul: operation is not supported",
):
sparse_result = torch.mm(A, B_sparse)
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128), (64, 128, 128)])
@parametrize("inference_mode", [subtest(True), subtest(False)])
@parametrize_backends
def test_linear(self, dense_input_shape, inference_mode, device, backend):
"""
Test nn.Linear has the same numerics
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
input = torch.rand((dense_input_shape), device=device).half()
model = nn.Linear(128, 256).to(device).half()
m, n = model.weight.shape
mask = rand_sparse_semi_structured_mask(m, n, device=device, dtype=torch.bool)
# set masked weight
model.weight = nn.Parameter(model.weight * mask)
dense_result = model(input)
model.weight = nn.Parameter(to_sparse_semi_structured(model.weight))
if inference_mode:
with torch.inference_mode():
sparse_result = model(input)
else:
sparse_result = model(input)
torch.testing.assert_close(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128), (64, 128, 128)])
@parametrize_backends
def test_mlp(self, device, dense_input_shape, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
input = torch.rand(dense_input_shape, device=device).half()
model = (
nn.Sequential(
nn.Linear(128, 256),
nn.Linear(256, 128),
)
.half()
.to(device)
)
for i in range(2):
m, n = model[i].weight.shape
mask = rand_sparse_semi_structured_mask(
m, n, device=device, dtype=torch.bool
)
# set masked weight
model[i].weight = nn.Parameter(model[i].weight * mask)
dense_result = model(input)
for i in range(2):
model[i].weight = nn.Parameter(to_sparse_semi_structured(model[i].weight))
sparse_result = model(input)
torch.testing.assert_close(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
@parametrize_backends
def test_values(self, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = rand_sparse_semi_structured_mask(128, 128)
A_sparse = to_sparse_semi_structured(A)
assert A_sparse.values().shape == (128, 64)
assert (A_sparse.values() == 1).all()
@parametrize_backends
def test_indices(self, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = rand_sparse_semi_structured_mask(128, 128)
A_sparse = to_sparse_semi_structured(A)
assert A_sparse.indices().shape == (128, 8)
@inference_dtypes
@parametrize_backends
def test_min_sparse_shape(self, dtype, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
config = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend]._DTYPE_SHAPE_CONSTRAINTS[dtype]
A = rand_sparse_semi_structured_mask(config.sparse_min_rows, config.sparse_min_cols, dtype=dtype, device=device)
A_sparse = to_sparse_semi_structured(A)
B = torch.rand((config.sparse_min_cols, config.dense_min_cols), device=device).to(dtype)
if dtype == torch.int8:
dense_res = torch.mm(A.cpu(), B.cpu()).to(device, dtype=torch.int8)
# int8 sparse matmul not supported for R/R -> R layout, so we transpose one of the arguments to get R/C -> R
B_t = B.t().contiguous()
sparse_res = torch.mm(A_sparse, B_t.t())
else:
dense_res = torch.mm(A, B)
sparse_res = torch.mm(A_sparse, B)
torch.testing.assert_close(sparse_res, dense_res, rtol=1e-3, atol=1e-3)
@inference_dtypes
@parametrize_backends
def test_unsupported_shape(self, dtype, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = rand_sparse_semi_structured_mask(2, 2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Error original_tensor.shape"):
A_sparse = to_sparse_semi_structured(A)
@dtypes(*all_types_and_complex())
@parametrize_backends
def test_unsupported_dtype(self, dtype, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = rand_sparse_semi_structured_mask(128, 128, dtype=dtype, device=device)
if dtype not in SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend]._DTYPE_SHAPE_CONSTRAINTS:
with self.assertRaisesRegex(RuntimeError, "Error original_tensor.dtype"):
A_sparse = to_sparse_semi_structured(A)
else:
A_sparse = to_sparse_semi_structured(A)
@parametrize_backends
def test_unsupported_dim(self, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
self.skipTest("CUTLASS not supported on Windows")
A = torch.rand(128, 128, 128, device=device, dtype=torch.float16)
with self.assertRaisesRegex(RuntimeError, "Error original_tensor.dim"):
A_sparse = to_sparse_semi_structured(A)
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_semi_structured.py
|
setUp
|
def setUp(self):
if len(SEMI_STRUCTURED_SUPPORTED_BACKENDS) == 0:
self.skipTest('semi-structured sparsity has no available backend!')
super().setUp()
|
import itertools
import random
import unittest
import torch
from torch import nn
import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
SparseSemiStructuredTensorCUSPARSELT,
SparseSemiStructuredTensorCUTLASS,
to_sparse_semi_structured,
)
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
)
from torch.testing._internal.common_dtype import all_types_and_complex
import torch._dynamo.test_case
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
subtest,
TestCase,
TEST_WITH_ROCM,
IS_WINDOWS,
)
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
_IS_SM9X = False
inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.int8)
training_dtypes = dtypes(torch.float16, torch.bfloat16)
parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
atol_rtol_kw = {
torch.float16: {
"rtol": 1e-3,
"atol": 1e-3,
},
torch.bfloat16: {
"rtol": 1e-1,
"atol": 1e-1,
},
}
class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
ref_sp_numpy
|
def ref_sp_numpy(c, a, b, alpha=None, beta=None, out=None):
def prep_input(t):
def to_sp_block_compressed(t):
if t.layout is torch.sparse_bsc:
tt = t.transpose(-1, -2)
else:
tt = t
t_sp_bsr = sp.bsr_matrix(
(
tt.values().cpu().numpy(),
tt.col_indices().cpu().numpy(),
tt.crow_indices().cpu().numpy(),
),
shape=tt.shape,
)
if t.layout is torch.sparse_bsc:
return t_sp_bsr.transpose()
else:
return t_sp_bsr
if t.layout is not torch.strided:
return to_sp_block_compressed(t)
else:
return t.cpu().resolve_conj().numpy()
res = _npref_block_addmm_addmv(
*map(lambda t: prep_input(t), (c, a, b)),
alpha,
beta
)
if out is not None:
out.copy_(res)
return out
else:
return res
|
def ref_sp_numpy(c, a, b, alpha=None, beta=None, out=None):
def prep_input(t):
def to_sp_block_compressed(t):
if t.layout is torch.sparse_bsc:
tt = t.transpose(-1, -2)
else:
tt = t
t_sp_bsr = sp.bsr_matrix(
(
tt.values().cpu().numpy(),
tt.col_indices().cpu().numpy(),
tt.crow_indices().cpu().numpy(),
),
shape=tt.shape,
)
if t.layout is torch.sparse_bsc:
return t_sp_bsr.transpose()
else:
return t_sp_bsr
if t.layout is not torch.strided:
return to_sp_block_compressed(t)
else:
return t.cpu().resolve_conj().numpy()
res = _npref_block_addmm_addmv(
*(prep_input(t) for t in (c, a, b)),
alpha,
beta
)
if out is not None:
out.copy_(res)
return out
else:
return res
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
prep_input
|
def prep_input(t):
def to_sp_block_compressed(t):
if t.layout is torch.sparse_bsc:
tt = t.transpose(-1, -2)
else:
tt = t
t_sp_bsr = sp.bsr_matrix(
(
tt.values().cpu().numpy(),
tt.col_indices().cpu().numpy(),
tt.crow_indices().cpu().numpy(),
),
shape=tt.shape,
)
if t.layout is torch.sparse_bsc:
return t_sp_bsr.transpose()
else:
return t_sp_bsr
if t.layout is not torch.strided:
return to_sp_block_compressed(t)
else:
return t.cpu().resolve_conj().numpy()
res = _npref_block_addmm_addmv(
*map(lambda t: prep_input(t), (c, a, b)),
alpha,
beta
)
if out is not None:
out.copy_(res)
return out
else:
return res
|
def prep_input(t):
def to_sp_block_compressed(t):
if t.layout is torch.sparse_bsc:
tt = t.transpose(-1, -2)
else:
tt = t
t_sp_bsr = sp.bsr_matrix(
(
tt.values().cpu().numpy(),
tt.col_indices().cpu().numpy(),
tt.crow_indices().cpu().numpy(),
),
shape=tt.shape,
)
if t.layout is torch.sparse_bsc:
return t_sp_bsr.transpose()
else:
return t_sp_bsr
if t.layout is not torch.strided:
return to_sp_block_compressed(t)
else:
return t.cpu().resolve_conj().numpy()
res = _npref_block_addmm_addmv(
*(prep_input(t) for t in (c, a, b)),
alpha,
beta
)
if out is not None:
out.copy_(res)
return out
else:
return res
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
ref_block_addmv
|
for (m, k) in itertools.product([2, 5], repeat=2):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size), check_invariants=False)
b = make_tensor((k * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
self.run_test_block_addmm_addmv(torch.addmv, c, a, b, dtype=dtype, device=device)
|
def ref_block_addmv(c, a, b, alpha, beta):
return _npref_block_addmm_addmv(c, a.to_dense(), b, alpha, beta)
for (m, k) in itertools.product([2, 5], repeat=2):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch.sparse_bsr_tensor(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size), check_invariants=False)
b = make_tensor((k * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
self.run_test_block_addmm_addmv(torch.addmv, c, a, b, dtype=dtype, device=device, ref=ref_block_addmv)
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse_csr.py
|
test_empty_inputs
|
def test_empty_inputs(lhs_layout, rhs_layout):
xd = torch.rand(10, 0, device=device, dtype=dtype)
yd = xd.transpose(-2, -1)
zd = torch.rand(0, 0, device=device, dtype=dtype)
xls, yls, zls = [t.to_sparse(layout=lhs_layout) for t in (xd, yd, zd)]
xrs, yrs, zrs = [t.to_sparse(layout=rhs_layout) for t in (xd, yd, zd)]
for ls, rs, ld, rd in [(xls, yrs, xd, yd), (xls, zrs, xd, zd), (zls, yrs, zd, yd), (zls, zrs, zd, zd)]:
res_sparse = ls @ rs
res_dense = ld @ rd
self.assertEqual(res_sparse.to_dense(), res_dense)
|
def test_empty_inputs(lhs_layout, rhs_layout):
xd = torch.rand(10, 0, device=device, dtype=dtype)
yd = xd.transpose(-2, -1)
zd = torch.rand(0, 0, device=device, dtype=dtype)
xls, yls, zls = (t.to_sparse(layout=lhs_layout) for t in (xd, yd, zd))
xrs, yrs, zrs = (t.to_sparse(layout=rhs_layout) for t in (xd, yd, zd))
for ls, rs, ld, rd in [(xls, yrs, xd, yd), (xls, zrs, xd, zd), (zls, yrs, zd, yd), (zls, zrs, zd, zd)]:
res_sparse = ls @ rs
res_dense = ld @ rd
self.assertEqual(res_sparse.to_dense(), res_dense)
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
_test_spadd_shape
|
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
|
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCompressedTensor(shape, nnz,
dtype=dtype,
device=device,
index_dtype=torch.int32,
layout=layout,
blocksize=())
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
res_perm = torch.add(x, y, alpha=r)
self.assertEqual(res_perm, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
res_perm = torch.add(x, y, alpha=r)
self.assertEqual(res, expected)
self.assertEqual(res_perm, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
_test_spadd_shape
|
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
|
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCompressedTensor(shape, nnz,
dtype=dtype,
device=device,
index_dtype=torch.int32,
layout=layout,
blocksize=())
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
res_perm = torch.add(x, y, alpha=r)
self.assertEqual(res_perm, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
res_perm = torch.add(x, y, alpha=r)
self.assertEqual(res, expected)
self.assertEqual(res_perm, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
|
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse_csr.py
|
broadcast_input
|
def broadcast_input(*ts):
batch_dims = torch.broadcast_shapes(*(t.shape[:-2] for t in ts))
yield from (torch.broadcast_to(t, batch_dims + t.shape[-2:]) for t in ts)
# NOTE: batch dims with zero sizes are not supported in `to_sparse_bsr`.
batches = [(), (2,), (2, 2)]
size = [128, 256, 0]
for bam, bq, bk, bv, m, n, k in itertools.product(batches, batches, batches, batches, size, size, size):
query = tensor(bq + (m, k))
key = tensor(bk + (n, k))
value = tensor(bv + (n, k))
# We make attn_mask block lower/upper triangular so that BSR and Strided
# function variants are directly comparable.
attn_mask = torch.ones(bam + (m, n), device=device, dtype=torch.bool)
attn_mask = self._to_block_triangular_inplace(attn_mask, block_size, block_size)
attn_mask_bsr = attn_mask.to_sparse_bsr(block_size)
# NOTE: only boolean mask is directly compatible with the Strided version
# without any pre-/post-processing. Hence we test against a boolean mask.
for scale in (None, 1. / 16):
if scale is None and query.size(-1) == 0:
scale = 1
expected = torch.nn.functional.scaled_dot_product_attention(
*broadcast_input(query, key, value, attn_mask), scale=scale
)
for mask_dtype in (torch.bool, dtype):
res = _scaled_dot_product_attention(query, key, value, attn_mask_bsr.to(mask_dtype), scale=scale)
self.assertEqual(res, expected)
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
test_triton_bsr_scatter_mm
|
def test_triton_bsr_scatter_mm(self, device, dtype, blocksize):
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
if isinstance(blocksize, str):
blocksize = tuple(map(int, blocksize.split('x')))
else:
blocksize = (blocksize,) * 2
# Note that each value in a non-zero block is in range blocksize * [low^2, high^2).
tensor = partial(make_tensor, device=device, dtype=dtype, low=0.5, high=1.5)
# NOTE: batch dims with zero sizes are not supported in `to_sparse_bsr`.
batches = [(), (2,), (2, 2)]
sizes = [blocksize[0], 2 * blocksize[0], 4 * blocksize[0]]
sizes_K = [blocksize[1], 2 * blocksize[1]]
for bd, bs, M, K, N, has_zero_row_block in itertools.product(batches, batches[:1], sizes, sizes_K, sizes, (False, True)):
bsr_dense = tensor(bs + (M, K))
if has_zero_row_block:
if M > blocksize[0]:
bsr_dense[:blocksize[0]].zero_()
else:
continue
bsr = bsr_dense.to_sparse_bsr(blocksize)
dense = tensor(bd + (K, N))
expected = bsr.to_dense() @ dense
for indices_format in ('bsr_strided_mm', 'bsr_strided_mm_compressed', 'scatter_mm'):
if indices_format in {'bsr_strided_mm', 'bsr_strided_mm_compressed'}:
SPLIT_N_list = [N]
while SPLIT_N_list[-1] > 1:
SPLIT_N_list.append(max(1, SPLIT_N_list[-1] // 2))
else:
SPLIT_N_list = [1]
for SPLIT_N in SPLIT_N_list:
indices_data = bsr_scatter_mm_indices_data(
bsr, dense, indices_format=indices_format, SPLIT_N=SPLIT_N)
try:
result = bsr_scatter_mm(bsr, dense, indices_data=indices_data)
except triton.compiler.OutOfResources:
# ensure that there was at least one succesful test:
assert SPLIT_N < SPLIT_N_list[0]
break
self.assertEqual(result, expected)
torch.sparse._triton_ops._bsr_scatter_mm_indices_data.cache_clear()
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
@skipIfNoTriton
class TestSparseCompressedTritonKernels(TestCase):
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
bsr_dense_linear
|
def bsr_dense_linear(input, weights, bias=None):
return torch.nn.functional.linear(input, weights, bias=bias).transpose(-1, -2)
operation = dict(bsr_dense_addmm=bsr_dense_addmm, bsr_dense_mm=bsr_dense_mm, bsr_dense_linear=bsr_dense_linear,
_int_bsr_dense_addmm=_int_bsr_dense_addmm)[op]
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse_csr.py
|
reference
|
def reference(input, mat1, mat2, beta=1, alpha=1, op=op):
assert mat1.layout is torch.strided
assert mat2.layout is torch.strided
if dtype is torch.int8:
if op == '_int_bsr_dense_addmm':
return beta * input + alpha * torch._int_mm(mat1, mat2)
# workaround RuntimeError: "addmm_cuda" not implemented for 'Char'
return beta * input + alpha * torch._int_mm(mat1, mat2).to(torch.int8)
return beta * input + alpha * (mat1 @ mat2)
if op == '_int_bsr_dense_addmm':
# _int_bsr_dense_addmm is same as bsr_dense_addmm except
# with int8 inputs, _int_bsr_dense_addmm returns int32
# result. This is covered by operation and reference
# definitions above and all other definitions below are
# identical between _int_bsr_dense_addmm and
# bsr_dense_addmm.
op = 'bsr_dense_addmm'
|
import torch
import random
import io
import itertools
import unittest
import functools
from contextlib import redirect_stderr
from torch.testing import make_tensor, FileCheck
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, TEST_CUDA_CUDSS, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase,
run_tests, load_tests, coalescedonoff, parametrize, subtest, skipIfTorchDynamo, skipIfRocm, IS_FBCODE, IS_REMOTE_GPU,
suppress_warnings)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
largeTensorTest)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and)
from torch.testing._internal.opinfo.definitions.linalg import sample_inputs_linalg_solve
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED, HIPSPARSE_SPMM_COMPLEX128_SUPPORTED
import operator
import scipy.sparse as sp
import numpy as np
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'deg2rad',
'neg',
'positive',
'frac',
'nn.functional.relu',
'log1p',
'rad2deg'
]
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
from functools import partial
import pickle
import re
import re
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
import warnings
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
from torch.utils._triton import has_triton
from torch.sparse._triton_ops import tile_to_blocksize
from functools import partial
from torch.sparse._triton_ops import bsr_softmax
from functools import partial
from torch.sparse._triton_ops import bsr_dense_mm
from torch.sparse._triton_ops import bsr_dense_mm
from functools import partial
from torch.sparse._triton_ops import _scaled_dot_product_attention
from functools import partial
from torch.sparse._triton_ops import sampled_addmm, broadcast_batch_dims_bsr
from torch.sparse._triton_ops import scatter_mm
from functools import partial
import triton
from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data
from functools import partial
from torch.sparse._triton_ops import TensorAsKey
from torch.sparse._triton_ops import bsr_dense_addmm, bsr_dense_mm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, get_meta,
optimize_bsr_dense_addmm, dump)
from torch.sparse._triton_ops import bsr_dense_addmm, _int_bsr_dense_addmm
from torch.sparse._triton_ops_meta import (create_blocked_tensor, tune_bsr_dense_addmm, tune__int_bsr_dense_addmm, get_meta)
from torch.sparse._triton_ops import bsr_dense_addmm_meta
from torch.sparse._triton_ops_meta import update as update_bsr_dense_addmm_meta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.