library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_pytree.py
|
test_tree_leaves_with_path
|
def test_tree_leaves_with_path(self):
class ANamedTuple(NamedTuple):
x: torch.Tensor
y: int
z: str
@dataclass
class ACustomPytree:
x: Any
y: Any
z: Any
py_pytree.register_pytree_node(
ACustomPytree,
flatten_fn=lambda f: ([f.x, f.y], f.z),
unflatten_fn=lambda xy, z: ACustomPytree(xy[0], xy[1], z),
flatten_with_keys_fn=lambda f: ((("x", f.x), ("y", f.y)), f.z),
)
SOME_PYTREES = [
(None,),
["hello", [1, 2], {"foo": [(3)]}],
[ANamedTuple(x=torch.rand(2, 3), y=1, z="foo")],
[ACustomPytree(x=12, y={"cin": [1, 4, 10], "bar": 18}, z="leaf"), 5],
]
for pytree in SOME_PYTREES:
flat_out, _ = py_pytree.tree_flatten_with_path(pytree)
leaves_out = py_pytree.tree_leaves_with_path(pytree)
self.assertEqual(flat_out, leaves_out)
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class TestPythonPytree(TestCase):
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
test_flatten_flatten_with_key_consistency
|
def test_flatten_flatten_with_key_consistency(self):
"""Check that flatten and flatten_with_key produces consistent leaves/context."""
reg = py_pytree.SUPPORTED_NODES
EXAMPLE_TREE = {
list: [1, 2, 3],
tuple: (1, 2, 3),
dict: {"foo": 1, "bar": 2},
namedtuple: collections.namedtuple("ANamedTuple", ["x", "y"])(1, 2),
OrderedDict: OrderedDict([("foo", 1), ("bar", 2)]),
defaultdict: defaultdict(int, {"foo": 1, "bar": 2}),
deque: deque([1, 2, 3]),
torch.Size: torch.Size([1, 2, 3]),
immutable_dict: immutable_dict({"foo": 1, "bar": 2}),
immutable_list: immutable_list([1, 2, 3]),
}
for typ in reg:
example = EXAMPLE_TREE.get(typ)
if example is None:
continue
flat_with_path, spec1 = py_pytree.tree_flatten_with_path(example)
flat, spec2 = py_pytree.tree_flatten(example)
self.assertEqual(flat, [x[1] for x in flat_with_path])
self.assertEqual(spec1, spec2)
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class TestPythonPytree(TestCase):
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
test_key_access
|
def test_key_access(self):
class ANamedTuple(NamedTuple):
x: str
y: int
tree = (["hello", [1, 2], {"foo": [(3)], "bar": [ANamedTuple(x="baz", y=10)]}],)
flat, _ = py_pytree.tree_flatten_with_path(tree)
for kp, val in flat:
self.assertEqual(py_pytree.key_get(tree, kp), val)
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class TestPythonPytree(TestCase):
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
setUp
|
def setUp(self):
if IS_FBCODE:
raise unittest.SkipTest("C++ pytree tests are not supported in fbcode")
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
import json
class TestCxxPytree(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
test_pytree_serialize
|
def test_pytree_serialize(self, spec):
# Ensure that the spec is valid
self.assertEqual(
spec,
py_pytree.tree_structure(
py_pytree.tree_unflatten([0] * spec.num_leaves, spec)
),
)
serialized_spec = py_pytree.treespec_dumps(spec)
self.assertIsInstance(serialized_spec, str)
self.assertEqual(spec, py_pytree.treespec_loads(serialized_spec))
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class TestPythonPytree(TestCase):
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
test_pytree_serialize_namedtuple
|
def test_pytree_serialize_namedtuple(self):
Point1 = namedtuple("Point1", ["x", "y"])
py_pytree._register_namedtuple(
Point1,
serialized_type_name="test_pytree.test_pytree_serialize_namedtuple.Point1",
)
spec = py_pytree.TreeSpec(
namedtuple, Point1, [py_pytree.LeafSpec(), py_pytree.LeafSpec()]
)
roundtrip_spec = py_pytree.treespec_loads(py_pytree.treespec_dumps(spec))
self.assertEqual(spec, roundtrip_spec)
class Point2(NamedTuple):
x: int
y: int
py_pytree._register_namedtuple(
Point2,
serialized_type_name="test_pytree.test_pytree_serialize_namedtuple.Point2",
)
spec = py_pytree.TreeSpec(
namedtuple, Point2, [py_pytree.LeafSpec(), py_pytree.LeafSpec()]
)
roundtrip_spec = py_pytree.treespec_loads(py_pytree.treespec_dumps(spec))
self.assertEqual(spec, roundtrip_spec)
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class TestPythonPytree(TestCase):
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
test_pytree_custom_type_serialize
|
def test_pytree_custom_type_serialize(self):
class DummyType:
def __init__(self, x, y):
self.x = x
self.y = y
py_pytree.register_pytree_node(
DummyType,
lambda dummy: ([dummy.x, dummy.y], None),
lambda xs, _: DummyType(*xs),
serialized_type_name="test_pytree_custom_type_serialize.DummyType",
to_dumpable_context=lambda context: "moo",
from_dumpable_context=lambda dumpable_context: None,
)
spec = py_pytree.TreeSpec(
DummyType, None, [py_pytree.LeafSpec(), py_pytree.LeafSpec()]
)
serialized_spec = py_pytree.treespec_dumps(spec, 1)
self.assertIn("moo", serialized_spec)
roundtrip_spec = py_pytree.treespec_loads(serialized_spec)
self.assertEqual(roundtrip_spec, spec)
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class TestPythonPytree(TestCase):
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_pytree.py
|
__init__
|
instantiate_parametrized_tests(TestPytree)
if __name__ == '__main__':
run_tests()
|
def __init__(self, x, y):
self.x = x
self.y = y
|
import collections
import inspect
import os
import re
import subprocess
import sys
import unittest
from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict
from dataclasses import dataclass
from typing import Any, NamedTuple
import torch
import torch.utils._pytree as py_pytree
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch.utils._cxx_pytree as cxx_pytree
GlobalPoint = namedtuple("GlobalPoint", ["x", "y"])
class GlobalDummyType:
import json
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_reductions.py
|
_amin_wrapper
|
def _amin_wrapper(x, dim=None, keepdims=False):
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[0]
else:
return torch._aminmax(x, dim, keepdims)[0]
|
def _amin_wrapper(x, dim=None, keepdims=False):
return torch.aminmax(x, dim=dim, keepdim=keepdims)[0]
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_reductions.py
|
test_invalid_0dim_aminmax
|
# TODO: bincount isn't a classic reduction -- maybe this test suite is
# reductions and summary ops?
|
def test_invalid_0dim_aminmax(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, 'not implemented'):
torch.aminmax(torch.tensor(1., dtype=dtype, device=device), dim=0)
# TODO: bincount isn't a classic reduction -- maybe this test suite is
# reductions and summary ops?
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
class TestReductions(TestCase):
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_reductions.py
|
create_noncontig_inputs
|
def create_noncontig_inputs(x, ndim):
if ndim == 2:
return x[::2, ::2]
elif ndim == 3:
return x[::2, ::2, ::2]
elif ndim == 4:
return x[::2, ::2, ::2, ::2]
elif ndim == 5:
return x[::2, ::2, ::2, ::2, ::2]
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_reductions.py
|
normfn_attr
|
def normfn_attr(t, dim, keepdim=False, out=None):
attr = torch.norm
return attr(t, 2, dim, keepdim, out=out)
for fn_name in dim_red_fns:
fn_attr = getattr(torch, fn_name) if fn_name != "norm" else normfn_attr
def fn(x, dim, keepdim=False, out=None):
ans = fn_attr(x, dim, keepdim=keepdim, out=out)
return ans if not isinstance(ans, tuple) else ans[0]
def fn_tuple(x, dim, keepdim=False, out=None):
return fn_attr(x, dim, keepdim=keepdim, out=out)
def test_multidim(x, dim):
self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())
# general case
x = torch.randn(3, 4, 5, device=device)
dim = random.randint(0, 2)
test_multidim(x, dim)
# check 1-d behavior
x = torch.randn(1, device=device)
dim = 0
self.assertEqual(fn(x, dim).shape, ())
self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))
# check reducing of a singleton dimension
dims = [3, 4, 5]
singleton_dim = random.randint(0, 2)
dims[singleton_dim] = 1
x = torch.randn(dims, device=device)
test_multidim(x, singleton_dim)
# check reducing with output kwargs
if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:
y = torch.randn(5, 3, device=device)
values = torch.randn(5, 3, device=device)
indices = torch.zeros(5, 3, device=device).long() - 1
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
self.assertEqual(values[:, 1], values_expected,
msg='{} values with out= kwarg'.format(fn_name))
self.assertEqual(indices[:, 1], indices_expected,
msg='{} indices with out= kwarg'.format(fn_name))
continue
x = torch.randn(5, 3, device=device)
y = torch.randn(5, 3, device=device)
fn(y, 1, keepdim=False, out=x[:, 1])
expected = fn(y, 1, keepdim=False)
self.assertEqual(x[:, 1], expected, msg='{} with out= kwarg'.format(fn_name))
|
def normfn_attr(t, dim, keepdim=False, out=None):
attr = torch.norm
return attr(t, 2, dim, keepdim, out=out)
fn_attr = getattr(torch, fn_name) if fn_name != "norm" else normfn_attr
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_reductions.py
|
test_multidim
|
def test_multidim(x, dim):
self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())
# general case
x = torch.randn(3, 4, 5, device=device)
dim = random.randint(0, 2)
test_multidim(x, dim)
# check 1-d behavior
x = torch.randn(1, device=device)
dim = 0
self.assertEqual(fn(x, dim).shape, ())
self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))
# check reducing of a singleton dimension
dims = [3, 4, 5]
singleton_dim = random.randint(0, 2)
dims[singleton_dim] = 1
x = torch.randn(dims, device=device)
test_multidim(x, singleton_dim)
# check reducing with output kwargs
if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:
y = torch.randn(5, 3, device=device)
values = torch.randn(5, 3, device=device)
indices = torch.zeros(5, 3, device=device).long() - 1
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
self.assertEqual(values[:, 1], values_expected,
msg='{} values with out= kwarg'.format(fn_name))
self.assertEqual(indices[:, 1], indices_expected,
msg='{} indices with out= kwarg'.format(fn_name))
continue
x = torch.randn(5, 3, device=device)
y = torch.randn(5, 3, device=device)
fn(y, 1, keepdim=False, out=x[:, 1])
expected = fn(y, 1, keepdim=False)
self.assertEqual(x[:, 1], expected, msg='{} with out= kwarg'.format(fn_name))
|
def test_multidim(x, dim):
self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())
# general case
x = torch.randn(3, 4, 5, device=device)
dim = random.randint(0, 2)
test_multidim(x, dim)
# check 1-d behavior
x = torch.randn(1, device=device)
dim = 0
self.assertEqual(fn(x, dim).shape, ())
self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))
# check reducing of a singleton dimension
dims = [3, 4, 5]
singleton_dim = random.randint(0, 2)
dims[singleton_dim] = 1
x = torch.randn(dims, device=device)
test_multidim(x, singleton_dim)
# check reducing with output kwargs
if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:
y = torch.randn(5, 3, device=device)
values = torch.randn(5, 3, device=device)
indices = torch.zeros(5, 3, device=device).long() - 1
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
self.assertEqual(values[:, 1], values_expected,
msg=f'{fn_name} values with out= kwarg')
self.assertEqual(indices[:, 1], indices_expected,
msg=f'{fn_name} indices with out= kwarg')
return
x = torch.randn(5, 3, device=device)
y = torch.randn(5, 3, device=device)
fn(y, 1, keepdim=False, out=x[:, 1])
expected = fn(y, 1, keepdim=False)
self.assertEqual(x[:, 1], expected, msg=f'{fn_name} with out= kwarg')
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_reductions.py
|
test_prod_lowp
|
def test_prod_lowp(self, device, dtype):
x = torch.rand(100, 100, dtype=dtype, device=device)
x_ref = x.float()
res1 = torch.prod(x, 1)
res2 = torch.prod(x_ref, 1)
self.assertEqual(res1, res2.to(dtype=dtype))
res1 = torch.prod(x, 0)
res2 = torch.prod(x_ref, 0)
self.assertEqual(res1, res2.to(dtype=dtype))
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
class TestReductions(TestCase):
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_reductions.py
|
test_output_dtype
|
def test_output_dtype(dtype, is_int32):
output = values_1d.to(dtype)
with self.assertRaisesRegex(
RuntimeError, "output tensor's dtype is wrong"):
torch.searchsorted(values_1d, values_1d, out=output, out_int32=is_int32)
test_output_dtype(torch.float32, False)
test_output_dtype(torch.int32, False)
test_output_dtype(torch.int64, True)
# invalid side argument
with self.assertRaisesRegex(RuntimeError, "side can only be 'left' or 'right'"):
torch.searchsorted(values_1d, values_1d, side='bad')
# invalid sorter argument, wrong size
with self.assertRaisesRegex(RuntimeError, "boundary and sorter must have the same size"):
sequence = torch.rand_like(values_1d, dtype=torch.float)
_, sorted_idx = torch.sort(sequence)
torch.searchsorted(sequence, values_1d, sorter=sorted_idx[:-1])
# invalid sorter argument, is not dtype long
with self.assertRaisesRegex(RuntimeError, "sorter must be a tensor of long dtype"):
sequence = torch.rand_like(values_1d, dtype=torch.float)
_, sorted_idx = torch.sort(sequence)
torch.searchsorted(sequence, values_1d, sorter=sorted_idx.to(torch.float32))
# scalar type bfloat16
if self.device_type == 'cpu':
def test_dtype_bfloat16(values_bf16=False, boundaries_bf16=False):
values_1d_float = values_1d.to(torch.float32)
boundaries = torch.tensor([0.9, 1, 2, 2, 3, 3, 4, 4.1, 9, 9], device=device, dtype=torch.float32)
if values_bf16:
values_1d_float = values_1d_float.to(torch.bfloat16)
if boundaries_bf16:
boundaries = boundaries.to(torch.bfloat16)
expected_result = torch.tensor([1, 2, 4, 6, 8, 8, 8, 8, 8], device=device, dtype=torch.int32)
self.assertEqual(torch.bucketize(values_1d_float, boundaries, out_int32=True), expected_result)
test_dtype_bfloat16(True, False)
test_dtype_bfloat16(False, True)
test_dtype_bfloat16(True, True)
|
def test_output_dtype(dtype, is_int32):
output = values_1d.to(dtype)
with self.assertRaisesRegex(
RuntimeError, "output tensor's dtype is wrong"):
torch.searchsorted(values_1d, values_1d, out=output, out_int32=is_int32)
test_output_dtype(torch.float32, False)
test_output_dtype(torch.int32, False)
test_output_dtype(torch.int64, True)
# invalid side argument
with self.assertRaisesRegex(RuntimeError, "side can only be 'left' or 'right'"):
torch.searchsorted(values_1d, values_1d, side='bad')
# invalid sorter argument, wrong size
with self.assertRaisesRegex(RuntimeError, "boundary and sorter must have the same size"):
sequence = torch.rand_like(values_1d, dtype=torch.float)
_, sorted_idx = torch.sort(sequence)
torch.searchsorted(sequence, values_1d, sorter=sorted_idx[:-1])
# invalid sorter argument, is not dtype long
with self.assertRaisesRegex(RuntimeError, "sorter must be a tensor of long dtype"):
sequence = torch.rand_like(values_1d, dtype=torch.float)
_, sorted_idx = torch.sort(sequence)
torch.searchsorted(sequence, values_1d, sorter=sorted_idx.to(torch.float32))
# invalid sorter value, out of bound (>= innermost size)
with self.assertRaisesRegex(RuntimeError, "sorter index out of range"):
torch.searchsorted(torch.tensor([1, 2, 3]), 2.5, sorter=torch.tensor([0, 1, 3]))
# invalid sorter value, out of bound (< 0)
with self.assertRaisesRegex(RuntimeError, "sorter index out of range"):
torch.searchsorted(torch.tensor([1, 2, 3]), 2.5, sorter=torch.tensor([-1, 1, 2]))
# scalar type bfloat16
if self.device_type == 'cpu':
def test_dtype_bfloat16(values_bf16=False, boundaries_bf16=False):
values_1d_float = values_1d.to(torch.float32)
boundaries = torch.tensor([0.9, 1, 2, 2, 3, 3, 4, 4.1, 9, 9], device=device, dtype=torch.float32)
if values_bf16:
values_1d_float = values_1d_float.to(torch.bfloat16)
if boundaries_bf16:
boundaries = boundaries.to(torch.bfloat16)
expected_result = torch.tensor([1, 2, 4, 6, 8, 8, 8, 8, 8], device=device, dtype=torch.int32)
self.assertEqual(torch.bucketize(values_1d_float, boundaries, out_int32=True), expected_result)
test_dtype_bfloat16(True, False)
test_dtype_bfloat16(False, True)
test_dtype_bfloat16(True, True)
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_reductions.py
|
_get_relaxed_tolerances_for
|
def _get_relaxed_tolerances_for(self, dtype):
if dtype == torch.float16:
atol = 0.4
rtol = 1e-2
elif dtype == torch.float32:
atol = 7e-05
rtol = 3e-06
else:
# Default values
atol = None
rtol = None
return atol, rtol
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
class TestReductions(TestCase):
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_reductions.py
|
is_integral
|
def is_integral(dtype):
return dtype in integral_types()
# On Windows CI, the current version of `numpy` promotes all lower integers
# dtypes to int32 while `torch` promotes them to int64. Hence we skip on checking
# the exact dtype.
# Reference : https://dr.pytorch.org/api/view-log-full?build_id=122051580
# PR : https://github.com/pytorch/pytorch/pull/38628#issuecomment-655905370
exact_dtype = False if (IS_WINDOWS and is_integral(dtype)) else True
if dtype == torch.uint8:
with self.assertRaises(TypeError):
self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype, with_extremal=with_extremal)
else:
# TODO: Investigate why the output is not close to numpy.
if dtype == torch.float16:
atol = 0.4
rtol = 1e-2
elif dtype == torch.float32:
atol = 7e-05
rtol = 3e-06
else:
# Default values
atol = None
rtol = None
self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype,
atol=atol, rtol=rtol, exact_dtype=exact_dtype,
with_keepdim=with_keepdim, with_extremal=with_extremal)
|
def is_integral(dtype):
return dtype in integral_types()
exact_dtype = True
# On Windows CI, the current version of `numpy` promotes all lower integers
# dtypes to int32 while `torch` promotes them to int64. Hence we skip on checking
# the exact dtype.
# Reference : https://dr.pytorch.org/api/view-log-full?build_id=122051580
# PR : https://github.com/pytorch/pytorch/pull/38628#issuecomment-655905370
if IS_WINDOWS and is_integral(dtype):
exact_dtype = False
# For uint8, numpy promotes to uint64 while torch promotes to int64.
# So we must skip this as well.
if dtype == torch.uint8:
exact_dtype = False
# TODO: Investigate why the output is not close to numpy.
atol, rtol = self._get_relaxed_tolerances_for(dtype)
self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype,
atol=atol, rtol=rtol, exact_dtype=exact_dtype,
with_keepdim=with_keepdim, with_extremal=with_extremal)
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_reductions.py
|
test_multidim
|
def test_multidim(x, dim):
self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())
# general case
x = torch.randn(3, 4, 5, device=device)
dim = random.randint(0, 2)
test_multidim(x, dim)
# check 1-d behavior
x = torch.randn(1, device=device)
dim = 0
self.assertEqual(fn(x, dim).shape, ())
self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))
# check reducing of a singleton dimension
dims = [3, 4, 5]
singleton_dim = random.randint(0, 2)
dims[singleton_dim] = 1
x = torch.randn(dims, device=device)
test_multidim(x, singleton_dim)
# check reducing with output kwargs
if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:
y = torch.randn(5, 3, device=device)
values = torch.randn(5, 3, device=device)
indices = torch.zeros(5, 3, device=device).long() - 1
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
self.assertEqual(values[:, 1], values_expected,
msg='{} values with out= kwarg'.format(fn_name))
self.assertEqual(indices[:, 1], indices_expected,
msg='{} indices with out= kwarg'.format(fn_name))
continue
x = torch.randn(5, 3, device=device)
y = torch.randn(5, 3, device=device)
fn(y, 1, keepdim=False, out=x[:, 1])
expected = fn(y, 1, keepdim=False)
self.assertEqual(x[:, 1], expected, msg='{} with out= kwarg'.format(fn_name))
|
def test_multidim(x, dim):
self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())
# general case
x = torch.randn(3, 4, 5, device=device)
dim = random.randint(0, 2)
test_multidim(x, dim)
# check 1-d behavior
x = torch.randn(1, device=device)
dim = 0
self.assertEqual(fn(x, dim).shape, ())
self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))
# check reducing of a singleton dimension
dims = [3, 4, 5]
singleton_dim = random.randint(0, 2)
dims[singleton_dim] = 1
x = torch.randn(dims, device=device)
test_multidim(x, singleton_dim)
# check reducing with output kwargs
if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:
y = torch.randn(5, 3, device=device)
values = torch.randn(5, 3, device=device)
indices = torch.zeros(5, 3, device=device).long() - 1
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
self.assertEqual(values[:, 1], values_expected,
msg=f'{fn_name} values with out= kwarg')
self.assertEqual(indices[:, 1], indices_expected,
msg=f'{fn_name} indices with out= kwarg')
return
x = torch.randn(5, 3, device=device)
y = torch.randn(5, 3, device=device)
fn(y, 1, keepdim=False, out=x[:, 1])
expected = fn(y, 1, keepdim=False)
self.assertEqual(x[:, 1], expected, msg=f'{fn_name} with out= kwarg')
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_reductions.py
|
to_numpy
|
def to_numpy(input):
if input.dtype is torch.bfloat16:
return input.cpu().to(torch.float32).numpy()
else:
return input.cpu().numpy()
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
for sample_input in samples:
t = sample_input.input
actual = op(t, *sample_input.args, **sample_input.kwargs)
exact_dtype = not (t.dtype is torch.bfloat16
or (op.promotes_int_to_float and not torch.is_floating_point(t)))
expected = op.ref(to_numpy(t), *sample_input.args,
**dict(
# `identity` is mapped to numpy reduction `initial` argument
identity=torch.masked._reduction_identity(op.name, t),
**sample_input.kwargs))
# Workaround https://github.com/pytorch/pytorch/issues/66556
expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances
msg = ("Failed to produce expected results! Input tensor was"
" {0}, torch result is {1}, and reference result is"
" {2}.").format(t, actual, expected) if t.numel() < 10 else None
self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)
|
def to_numpy(input):
if input.dtype is torch.bfloat16:
return input.cpu().to(torch.float32).numpy()
else:
return input.cpu().numpy()
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
for sample_input in samples:
t = sample_input.input
actual = op(t, *sample_input.args, **sample_input.kwargs)
exact_dtype = not (t.dtype is torch.bfloat16
or (op.promotes_int_to_float and not torch.is_floating_point(t)))
expected = op.ref(to_numpy(t), *sample_input.args,
**dict(
# `identity` is mapped to numpy reduction `initial` argument
identity=torch.masked._reduction_identity(op.name, t),
**sample_input.kwargs))
# Workaround https://github.com/pytorch/pytorch/issues/66556
expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances
# Numpy differs, producing uint32 on Windows
if expected.dtype in [np.uint64, np.uint32]:
exact_dtype = False
msg = ("Failed to produce expected results! Input tensor was"
f" {t}, torch result is {actual}, and reference result is"
f" {expected}.") if t.numel() < 10 else None
self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_scatter_gather_ops.py
|
test_gather_expanded_index
|
def test_gather_expanded_index(self, device, dtype):
def helper(input_size, idx_size):
input = torch.randn(input_size, device=device).to(dtype=dtype)
input2 = input.clone()
shape = [1] * len(input_size)
shape[0] = idx_size
dim_size = input_size[0]
idx = torch.randint(0, dim_size, shape)
# Test the fast path on gather when index is expanded
expanded_shape = input_size
expanded_shape[0] = idx_size
idx = idx.expand(expanded_shape)
idx2 = idx.contiguous()
out = torch.gather(input, 0, idx)
out2 = torch.gather(input2, 0, idx2)
self.assertEqual(out, out2)
helper([50, 17], 100)
helper([50, 1], 100)
helper([50, 8, 7], 100)
helper([50, 3, 4, 5], 100)
# Generic Device Test Framework instantation, see
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# for details.
|
def test_gather_expanded_index(self, device, dtype):
# Test when index is [N, 1], which would have stride [1, 0]
# should be excluded from the fast path when index ix expanded
input = torch.arange(25).view(5, 5)
input2 = input.to(dtype=dtype)
idx = torch.arange(5).view(5, 1)
out = torch.gather(input, 0, idx)
out2 = torch.gather(input2, 0, idx)
self.assertEqual(out.to(dtype=dtype), out2)
def helper(input_size, idx_size):
input = torch.randn(input_size, device=device).to(dtype=dtype)
input2 = input.clone()
shape = [1] * len(input_size)
shape[0] = idx_size
dim_size = input_size[0]
idx = torch.randint(0, dim_size, shape)
# Test the fast path on gather when index is expanded
expanded_shape = input_size
expanded_shape[0] = idx_size
idx = idx.expand(expanded_shape)
idx2 = idx.contiguous()
out = torch.gather(input, 0, idx)
out2 = torch.gather(input2, 0, idx2)
self.assertEqual(out, out2)
helper([50, 17], 100)
helper([50, 1], 100)
helper([50, 8, 7], 100)
helper([50, 3, 4, 5], 100)
# Generic Device Test Framework instantation, see
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# for details.
|
import random
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(parametrize, run_tests, TestCase, DeterministicGuard)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA,
toleranceOverride, tol,)
from torch.testing._internal.common_dtype import \
(get_all_dtypes,)
class TestScatterGather(TestCase):
|
import random
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(parametrize, run_tests, TestCase, DeterministicGuard)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA,
toleranceOverride, tol,)
from torch.testing._internal.common_dtype import \
(get_all_dtypes,)
class TestScatterGather(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_schema_check.py
|
secretly_aliasing
|
def secretly_aliasing(x):
return x.view(-1)
|
import os
import sys
import torch
from torch.utils._pytree import tree_map
import unittest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_schema_check.py
|
secretly_mutating
|
def secretly_mutating(x):
x.mul_(2)
return x * 3
|
import os
import sys
import torch
from torch.utils._pytree import tree_map
import unittest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_schema_check.py
|
setUp
|
# Tests that SchemaCheckMode records operator order with grad
|
def setUp(self):
if TEST_WITH_TORCHDYNAMO:
self.skipTest("SchemaCheckMode is ignored by dynamo")
super().setUp()
# Tests that SchemaCheckMode records operator order with grad
|
import os
import sys
import torch
from torch.utils._pytree import tree_map
import unittest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
custom_lib = torch.library.Library("bad_schemas", "DEF") # noqa: TOR901
custom_lib_cpu = torch.library.Library("bad_schemas", "IMPL", "CPU") # noqa: TOR901
custom_lib_meta = torch.library.Library("bad_schemas", "IMPL", "Meta") # noqa: TOR901
class TestSchemaCheck(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_reductions.py
|
_assert_warning
|
def _assert_warning(_func, _tensor, _correction):
with warnings.catch_warnings(record=True) as w:
_func(_tensor, dim=-1, correction=_correction)
self.assertIn('degrees of freedom is <= 0', str(w[0].message))
correction = 20
size = (10, correction)
tensor = make_tensor(size, dtype=dtype, device=device)
for f in [torch.std, torch.var, torch.var_mean, torch.std_mean]:
_assert_warning(f, tensor, correction)
|
import contextlib
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
parametrize,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, ReductionPythonRefInfo, reduction_ops, reference_masked_ops)
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
from scipy.special import logsumexp
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_schema_check.py
|
f
|
def f(x):
return torch.ops.bad_schemas.secretly_aliasing(x)
x = torch.rand((3, 3))
with self.assertRaisesRegex(RuntimeError, "not defined to alias output but was aliasing"):
with SchemaCheckMode() as s:
out = f(x)
|
import os
import sys
import torch
from torch.utils._pytree import tree_map
import unittest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
custom_lib = torch.library.Library("bad_schemas", "DEF") # noqa: TOR901
custom_lib_cpu = torch.library.Library("bad_schemas", "IMPL", "CPU") # noqa: TOR901
custom_lib_meta = torch.library.Library("bad_schemas", "IMPL", "Meta") # noqa: TOR901
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_schema_check.py
|
f
|
def f(x):
return torch.ops.bad_schemas.secretly_aliasing(x)
x = torch.rand((3, 3))
with self.assertRaisesRegex(RuntimeError, "not defined to alias output but was aliasing"):
with SchemaCheckMode() as s:
out = f(x)
|
import os
import sys
import torch
from torch.utils._pytree import tree_map
import unittest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
custom_lib = torch.library.Library("bad_schemas", "DEF") # noqa: TOR901
custom_lib_cpu = torch.library.Library("bad_schemas", "IMPL", "CPU") # noqa: TOR901
custom_lib_meta = torch.library.Library("bad_schemas", "IMPL", "Meta") # noqa: TOR901
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_schema_check.py
|
f
|
# Tests that is_alias_of returns as expected
|
def f(x):
return torch.ops.bad_schemas.secretly_aliasing(x)
x = torch.rand((3, 3))
with self.assertRaisesRegex(RuntimeError, "not defined to alias output but was aliasing"):
with SchemaCheckMode() as s:
out = f(x)
|
import os
import sys
import torch
from torch.utils._pytree import tree_map
import unittest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_TORCHDYNAMO
from torch.fx.operator_schemas import normalize_function
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
custom_lib = torch.library.Library("bad_schemas", "DEF") # noqa: TOR901
custom_lib_cpu = torch.library.Library("bad_schemas", "IMPL", "CPU") # noqa: TOR901
custom_lib_meta = torch.library.Library("bad_schemas", "IMPL", "Meta") # noqa: TOR901
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_segment_reductions.py
|
get_default_value
|
def get_default_value(initial_value, reduction):
if initial_value is not None:
return initial_value
if reduction == "max":
return -float("Inf")
elif reduction == "mean":
return float("nan")
elif reduction == "min":
return float("Inf")
elif reduction == "sum":
return 0.0
elif reduction == "prod":
return 1.0
class TestSegmentReductions(TestCase):
def _test_common(
self,
reduction,
device,
dtype,
unsafe,
axis,
initial_value,
data_arr,
lengths_arr,
expected_arr,
expected_grad_arr,
check_backward,
lengths_dtype=torch.int,
):
lengths = torch.tensor(lengths_arr, device=device, dtype=lengths_dtype)
# generate offsets from lengths
zeros_shape = list(lengths.shape)
zeros_shape[-1] = 1
offsets = torch.cat((lengths.new_zeros(zeros_shape), lengths), -1).cumsum_(-1)
data = torch.tensor(
data_arr,
device=device,
dtype=dtype,
requires_grad=True,
)
expected_result = torch.tensor(expected_arr, device=device, dtype=dtype)
expected_grad = torch.tensor(expected_grad_arr, device=device, dtype=dtype)
for mode in ['lengths', 'offsets']:
segment_reduce_kwargs = dict(
axis=axis,
unsafe=unsafe,
initial=initial_value)
if (mode == 'lengths'):
segment_reduce_kwargs['lengths'] = lengths
else:
segment_reduce_kwargs['offsets'] = offsets
actual_result = torch._segment_reduce(
data=data,
reduce=reduction,
**segment_reduce_kwargs
)
self.assertEqual(
expected_result, actual_result, rtol=1e-02, atol=1e-05, equal_nan=True
)
if not check_backward:
return
# Test backward
actual_result.sum().backward()
self.assertEqual(
expected_grad, data.grad, rtol=1e-02, atol=1e-05, equal_nan=True
)
data = data.clone().detach().requires_grad_(True)
# gradcheck does not work well with bfloat16 or fp16 cpu types
# also there is small numerical difference with fp32
if dtype not in [torch.half, torch.bfloat16, torch.float]:
# gradcheck does not like "nan" input, setting to random 10
d_non_nan = np.nan_to_num(data_arr, nan=10)
new_data = torch.tensor(
# [10 if v == float("nan") else v for v in data],
d_non_nan,
device=device,
dtype=dtype,
requires_grad=True,
)
self.assertTrue(
gradcheck(
lambda x: torch._segment_reduce(
data=x,
reduce=reduction,
**segment_reduce_kwargs
),
(new_data,),
)
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_simple_1d(self, device, dtypes):
val_dtype, length_type = dtypes
lengths = [1, 2, 3, 0]
data = [1, float("nan"), 3, 4, 5, 5]
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "max":
expected_result = [1, float("nan"), 5, default_value]
expected_grad = [1, 1, 0, 0, 0.5, 0.5]
elif reduction == "mean":
expected_result = [1, float("nan"), 4.666, default_value]
expected_grad = [1.0, 0.5, 0.5, 0.333, 0.333, 0.333]
elif reduction == "min":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [1, float("nan"), 4, default_value]
expected_grad = [1.0, 1.0, 0, 1, 0, 0]
elif reduction == "sum":
expected_result = [1, float("nan"), 14, default_value]
expected_grad = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [2, float("nan"), 200, default_value]
expected_grad = [2.0, 6.0, float("nan"), 50.0, 40.0, 40.0]
else:
expected_result = [1, float("nan"), 100, default_value]
expected_grad = [1.0, 3.0, float("nan"), 25.0, 20.0, 20.0]
for axis in [0, -1]:
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
length_type,
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_multi_d_simple(self, device, dtypes):
val_dtype, length_type = dtypes
axis = 0
lengths = [1, 2, 3, 0]
data = [[1, 1], [float("nan"), 1], [3, float("nan")], [4, 1], [3, 2], [2, 3]]
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "amax":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[4, 3],
[default_value, default_value],
]
expected_grad = [
[1, 1],
[1, 0],
[0, 1],
[1, 0],
[0, 0],
[0, 1],
]
elif reduction == "mean":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[3, 2],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[0.5, 0.5],
[0.5, 0.5],
[0.333, 0.333],
[0.333, 0.333],
[0.333, 0.333],
]
elif reduction == "amin":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[2, 1],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[1, 0],
[0, 1],
[0, 1],
[0, 0],
[1, 0],
]
elif reduction == "sum":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[9, 6],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
]
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [
[2, 2],
[float("nan"), float("nan")],
[48, 12],
[default_value, default_value],
]
expected_grad = [
[2.0, 2.0],
[6.0, float("nan")],
[float("nan"), 2.0],
[12.0, 12.0],
[16.0, 6.0],
[24.0, 4.0],
]
else:
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[24, 6],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[3.0, float("nan")],
[float("nan"), 1.0],
[6.0, 6.0],
[8.0, 3.0],
[12.0, 2.0],
]
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
@parametrize("reduce", ['sum', 'prod', 'amin', 'amax', 'mean'])
def test_pytorch_scatter_test_cases(self, device, dtypes, reduce):
val_dtype, length_dtype = dtypes
# zero-length segments are filled with reduction inits contrary to pytorch_scatter.
tests = [
{
'src': [1, 2, 3, 4, 5, 6],
'index': [0, 0, 1, 1, 1, 3],
'indptr': [0, 2, 5, 5, 6],
'sum': [3, 12, 0, 6],
'prod': [2, 60, 1, 6],
'mean': [1.5, 4, float('nan'), 6],
'amin': [1, 3, float('inf'), 6],
'amax': [2, 5, -float('inf'), 6],
},
{
'src': [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]],
'index': [0, 0, 1, 1, 1, 3],
'indptr': [0, 2, 5, 5, 6],
'sum': [[4, 6], [21, 24], [0, 0], [11, 12]],
'prod': [[3, 8], [315, 480], [1, 1], [11, 12]],
'mean': [[2, 3], [7, 8], [float('nan'), float('nan')], [11, 12]],
'amin': [[1, 2], [5, 6], [float('inf'), float('inf')], [11, 12]],
'amax': [[3, 4], [9, 10], [-float('inf'), -float('inf')], [11, 12]],
},
{
'src': [[1, 3, 5, 7, 9, 11], [2, 4, 6, 8, 10, 12]],
'index': [[0, 0, 1, 1, 1, 3], [0, 0, 0, 1, 1, 2]],
'indptr': [[0, 2, 5, 5, 6], [0, 3, 5, 6, 6]],
'sum': [[4, 21, 0, 11], [12, 18, 12, 0]],
'prod': [[3, 315, 1, 11], [48, 80, 12, 1]],
'mean': [[2, 7, float('nan'), 11], [4, 9, 12, float('nan')]],
'amin': [[1, 5, float('inf'), 11], [2, 8, 12, float('inf')]],
'amax': [[3, 9, -float('inf'), 11], [6, 10, 12, -float('inf')]],
},
{
'src': [[[1, 2], [3, 4], [5, 6]], [[7, 9], [10, 11], [12, 13]]],
'index': [[0, 0, 1], [0, 2, 2]],
'indptr': [[0, 2, 3, 3], [0, 1, 1, 3]],
'sum': [[[4, 6], [5, 6], [0, 0]], [[7, 9], [0, 0], [22, 24]]],
'prod': [[[3, 8], [5, 6], [1, 1]], [[7, 9], [1, 1], [120, 143]]],
'mean': [[[2, 3], [5, 6], [float('nan'), float('nan')]],
[[7, 9], [float('nan'), float('nan')], [11, 12]]],
'amin': [[[1, 2], [5, 6], [float('inf'), float('inf')]],
[[7, 9], [float('inf'), float('inf')], [10, 11]]],
'amax': [[[3, 4], [5, 6], [-float('inf'), -float('inf')]],
[[7, 9], [-float('inf'), -float('inf')], [12, 13]]],
},
{
'src': [[1, 3], [2, 4]],
'index': [[0, 0], [0, 0]],
'indptr': [[0, 2], [0, 2]],
'sum': [[4], [6]],
'prod': [[3], [8]],
'mean': [[2], [3]],
'amin': [[1], [2]],
'amax': [[3], [4]],
},
{
'src': [[[1, 1], [3, 3]], [[2, 2], [4, 4]]],
'index': [[0, 0], [0, 0]],
'indptr': [[0, 2], [0, 2]],
'sum': [[[4, 4]], [[6, 6]]],
'prod': [[[3, 3]], [[8, 8]]],
'mean': [[[2, 2]], [[3, 3]]],
'amin': [[[1, 1]], [[2, 2]]],
'amax': [[[3, 3]], [[4, 4]]],
},
]
for test in tests:
data = torch.tensor(test['src'], dtype=val_dtype, device=device, requires_grad=True)
indptr = torch.tensor(test['indptr'], dtype=length_dtype, device=device)
dim = indptr.ndim - 1
# calculate lengths from indptr
lengths = torch.diff(indptr, dim=dim)
expected = torch.tensor(test[reduce], dtype=val_dtype, device=device)
actual_result = torch._segment_reduce(
data=data,
reduce=reduce,
lengths=lengths,
axis=dim,
unsafe=True,
)
self.assertEqual(actual_result, expected)
# test offsets
actual_result = torch._segment_reduce(
data=data,
reduce=reduce,
offsets=indptr,
axis=dim,
unsafe=True,
)
self.assertEqual(actual_result, expected)
if val_dtype == torch.float64:
def fn(x, mode='lengths'):
initial = 1
# supply initial values to prevent gradcheck from failing for 0 length segments
# where nan/inf are reduction identities that produce nans when calculating the numerical jacobian
if reduce == 'amin':
initial = 1000
elif reduce == 'amax':
initial = -1000
segment_reduce_args = {x, reduce}
segment_reduce_kwargs = dict(axis=dim, unsafe=True, initial=initial)
if mode == 'lengths':
segment_reduce_kwargs[mode] = lengths
elif mode == 'offsets':
segment_reduce_kwargs[mode] = indptr
return torch._segment_reduce(*segment_reduce_args, **segment_reduce_kwargs)
self.assertTrue(gradcheck(partial(fn, mode='lengths'), (data.clone().detach().requires_grad_(True))))
self.assertTrue(gradcheck(partial(fn, mode='offsets'), (data.clone().detach().requires_grad_(True))))
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_multi_d(self, device, dtypes):
val_dtype, length_type = dtypes
axis = 0
lengths = [0, 2, 3, 0]
data = np.arange(50).reshape(5, 2, 5).tolist()
expected_grad = []
# TODO: calculate grad and check correctness
check_backward = False
for reduction in reductions:
initial_value = 0
if reduction == "amax":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.max(data[:2], axis=0).tolist(),
np.max(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "mean":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.mean(data[:2], axis=0).tolist(),
np.mean(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "amin":
initial_value = 1000 # some high number
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.min(data[:2], axis=0).tolist(),
np.min(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "sum":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.sum(data[:2], axis=0).tolist(),
np.sum(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "prod":
initial_value = 1
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.prod(data[:2], axis=0).tolist(),
np.prod(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
)
@dtypes(torch.int, torch.int64)
def test_unsafe_flag(self, device, dtype):
length_type = dtype
lengths = torch.tensor([0, 2, 3, 0], device=device, dtype=length_type)
data = torch.arange(6, dtype=torch.float, device=device)
# test for error on 1-D lenghts
with self.assertRaisesRegex(RuntimeError, "Expected all rows of lengths along axis"):
torch._segment_reduce(data, 'sum', lengths=lengths, axis=0, unsafe=False)
# test for error on multi-D lengths
nd_lengths = torch.tensor([[0, 3, 3, 0], [2, 3, 0, 0]], dtype=length_type, device=device)
nd_data = torch.arange(12, dtype=torch.float, device=device).reshape(2, 6)
with self.assertRaisesRegex(RuntimeError, "Expected all rows of lengths along axis"):
torch._segment_reduce(nd_data, 'sum', lengths=nd_lengths, axis=1, unsafe=False)
instantiate_device_type_tests(TestSegmentReductions, globals())
if __name__ == "__main__":
run_tests()
|
def get_default_value(initial_value, reduction):
if initial_value is not None:
return initial_value
if reduction == "max":
return -float("Inf")
elif reduction == "mean":
return float("nan")
elif reduction == "min":
return float("Inf")
elif reduction == "sum":
return 0.0
elif reduction == "prod":
return 1.0
class TestSegmentReductions(TestCase):
def _test_common(
self,
reduction,
device,
dtype,
unsafe,
axis,
initial_value,
data_arr,
lengths_arr,
expected_arr,
expected_grad_arr,
check_backward,
lengths_dtype=torch.int,
):
lengths = torch.tensor(lengths_arr, device=device, dtype=lengths_dtype)
# generate offsets from lengths
zeros_shape = list(lengths.shape)
zeros_shape[-1] = 1
offsets = torch.cat((lengths.new_zeros(zeros_shape), lengths), -1).cumsum_(-1)
data = torch.tensor(
data_arr,
device=device,
dtype=dtype,
requires_grad=True,
)
expected_result = torch.tensor(expected_arr, device=device, dtype=dtype)
expected_grad = torch.tensor(expected_grad_arr, device=device, dtype=dtype)
for mode in ['lengths', 'offsets']:
segment_reduce_kwargs = dict(
axis=axis,
unsafe=unsafe,
initial=initial_value)
if (mode == 'lengths'):
segment_reduce_kwargs['lengths'] = lengths
else:
segment_reduce_kwargs['offsets'] = offsets
actual_result = torch._segment_reduce(
data=data,
reduce=reduction,
**segment_reduce_kwargs
)
self.assertEqual(
expected_result, actual_result, rtol=1e-02, atol=1e-05, equal_nan=True
)
if not check_backward:
return
# Test backward
actual_result.sum().backward()
self.assertEqual(
expected_grad, data.grad, rtol=1e-02, atol=1e-05, equal_nan=True
)
data = data.clone().detach().requires_grad_(True)
# gradcheck does not work well with bfloat16 or fp16 cpu types
# also there is small numerical difference with fp32
if dtype not in [torch.half, torch.bfloat16, torch.float]:
# gradcheck does not like "nan" input, setting to random 10
d_non_nan = np.nan_to_num(data_arr, nan=10)
new_data = torch.tensor(
# [10 if v == float("nan") else v for v in data],
d_non_nan,
device=device,
dtype=dtype,
requires_grad=True,
)
self.assertTrue(
gradcheck(
lambda x: torch._segment_reduce(
data=x,
reduce=reduction,
**segment_reduce_kwargs
),
(new_data,),
)
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_simple_1d(self, device, dtypes):
val_dtype, length_type = dtypes
lengths = [1, 2, 3, 0]
data = [1, float("nan"), 3, 4, 5, 5]
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "max":
expected_result = [1, float("nan"), 5, default_value]
expected_grad = [1, 1, 0, 0, 0.5, 0.5]
elif reduction == "mean":
expected_result = [1, float("nan"), 4.666, default_value]
expected_grad = [1.0, 0.5, 0.5, 0.333, 0.333, 0.333]
elif reduction == "min":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [1, float("nan"), 4, default_value]
expected_grad = [1.0, 1.0, 0, 1, 0, 0]
elif reduction == "sum":
expected_result = [1, float("nan"), 14, default_value]
expected_grad = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [2, float("nan"), 200, default_value]
expected_grad = [2.0, 6.0, float("nan"), 50.0, 40.0, 40.0]
else:
expected_result = [1, float("nan"), 100, default_value]
expected_grad = [1.0, 3.0, float("nan"), 25.0, 20.0, 20.0]
for axis in [0, -1]:
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
length_type,
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_simple_zero_length(self, device, dtypes):
val_dtype, length_type = dtypes
lengths = [0, 0]
data = torch.ones(0)
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "max":
expected_result = [default_value, default_value]
expected_grad = []
elif reduction == "mean":
expected_result = [default_value, default_value]
expected_grad = []
elif reduction == "min":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [default_value, default_value]
expected_grad = []
elif reduction == "sum":
expected_result = [default_value, default_value]
expected_grad = []
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [default_value, default_value]
expected_grad = []
else:
expected_result = [default_value, default_value]
expected_grad = []
for axis in [0]:
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
length_type,
)
@skipIfRocm
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_multi_d_simple(self, device, dtypes):
val_dtype, length_type = dtypes
axis = 0
lengths = [1, 2, 3, 0]
data = [[1, 1], [float("nan"), 1], [3, float("nan")], [4, 1], [3, 2], [2, 3]]
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "max":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[4, 3],
[default_value, default_value],
]
expected_grad = [
[1, 1],
[1, 0],
[0, 1],
[1, 0],
[0, 0],
[0, 1],
]
elif reduction == "mean":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[3, 2],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[0.5, 0.5],
[0.5, 0.5],
[0.333, 0.333],
[0.333, 0.333],
[0.333, 0.333],
]
elif reduction == "min":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[2, 1],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[1, 0],
[0, 1],
[0, 1],
[0, 0],
[1, 0],
]
elif reduction == "sum":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[9, 6],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
]
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [
[2, 2],
[float("nan"), float("nan")],
[48, 12],
[default_value, default_value],
]
expected_grad = [
[2.0, 2.0],
[6.0, float("nan")],
[float("nan"), 2.0],
[12.0, 12.0],
[16.0, 6.0],
[24.0, 4.0],
]
else:
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[24, 6],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[3.0, float("nan")],
[float("nan"), 1.0],
[6.0, 6.0],
[8.0, 3.0],
[12.0, 2.0],
]
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
@parametrize("reduce", ['sum', 'prod', 'min', 'max', 'mean'])
def test_pytorch_scatter_test_cases(self, device, dtypes, reduce):
val_dtype, length_dtype = dtypes
# zero-length segments are filled with reduction inits contrary to pytorch_scatter.
tests = [
{
'src': [1, 2, 3, 4, 5, 6],
'index': [0, 0, 1, 1, 1, 3],
'indptr': [0, 2, 5, 5, 6],
'sum': [3, 12, 0, 6],
'prod': [2, 60, 1, 6],
'mean': [1.5, 4, float('nan'), 6],
'min': [1, 3, float('inf'), 6],
'max': [2, 5, -float('inf'), 6],
},
{
'src': [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]],
'index': [0, 0, 1, 1, 1, 3],
'indptr': [0, 2, 5, 5, 6],
'sum': [[4, 6], [21, 24], [0, 0], [11, 12]],
'prod': [[3, 8], [315, 480], [1, 1], [11, 12]],
'mean': [[2, 3], [7, 8], [float('nan'), float('nan')], [11, 12]],
'min': [[1, 2], [5, 6], [float('inf'), float('inf')], [11, 12]],
'max': [[3, 4], [9, 10], [-float('inf'), -float('inf')], [11, 12]],
},
{
'src': [[1, 3, 5, 7, 9, 11], [2, 4, 6, 8, 10, 12]],
'index': [[0, 0, 1, 1, 1, 3], [0, 0, 0, 1, 1, 2]],
'indptr': [[0, 2, 5, 5, 6], [0, 3, 5, 6, 6]],
'sum': [[4, 21, 0, 11], [12, 18, 12, 0]],
'prod': [[3, 315, 1, 11], [48, 80, 12, 1]],
'mean': [[2, 7, float('nan'), 11], [4, 9, 12, float('nan')]],
'min': [[1, 5, float('inf'), 11], [2, 8, 12, float('inf')]],
'max': [[3, 9, -float('inf'), 11], [6, 10, 12, -float('inf')]],
},
{
'src': [[[1, 2], [3, 4], [5, 6]], [[7, 9], [10, 11], [12, 13]]],
'index': [[0, 0, 1], [0, 2, 2]],
'indptr': [[0, 2, 3, 3], [0, 1, 1, 3]],
'sum': [[[4, 6], [5, 6], [0, 0]], [[7, 9], [0, 0], [22, 24]]],
'prod': [[[3, 8], [5, 6], [1, 1]], [[7, 9], [1, 1], [120, 143]]],
'mean': [[[2, 3], [5, 6], [float('nan'), float('nan')]],
[[7, 9], [float('nan'), float('nan')], [11, 12]]],
'min': [[[1, 2], [5, 6], [float('inf'), float('inf')]],
[[7, 9], [float('inf'), float('inf')], [10, 11]]],
'max': [[[3, 4], [5, 6], [-float('inf'), -float('inf')]],
[[7, 9], [-float('inf'), -float('inf')], [12, 13]]],
},
{
'src': [[1, 3], [2, 4]],
'index': [[0, 0], [0, 0]],
'indptr': [[0, 2], [0, 2]],
'sum': [[4], [6]],
'prod': [[3], [8]],
'mean': [[2], [3]],
'min': [[1], [2]],
'max': [[3], [4]],
},
{
'src': [[[1, 1], [3, 3]], [[2, 2], [4, 4]]],
'index': [[0, 0], [0, 0]],
'indptr': [[0, 2], [0, 2]],
'sum': [[[4, 4]], [[6, 6]]],
'prod': [[[3, 3]], [[8, 8]]],
'mean': [[[2, 2]], [[3, 3]]],
'min': [[[1, 1]], [[2, 2]]],
'max': [[[3, 3]], [[4, 4]]],
},
]
for test in tests:
data = torch.tensor(test['src'], dtype=val_dtype, device=device, requires_grad=True)
indptr = torch.tensor(test['indptr'], dtype=length_dtype, device=device)
dim = indptr.ndim - 1
# calculate lengths from indptr
lengths = torch.diff(indptr, dim=dim)
expected = torch.tensor(test[reduce], dtype=val_dtype, device=device)
actual_result = torch._segment_reduce(
data=data,
reduce=reduce,
lengths=lengths,
axis=dim,
unsafe=True,
)
self.assertEqual(actual_result, expected)
# test offsets
actual_result = torch._segment_reduce(
data=data,
reduce=reduce,
offsets=indptr,
axis=dim,
unsafe=True,
)
self.assertEqual(actual_result, expected)
if val_dtype == torch.float64:
def fn(x, mode='lengths'):
initial = 1
# supply initial values to prevent gradcheck from failing for 0 length segments
# where nan/inf are reduction identities that produce nans when calculating the numerical jacobian
if reduce == 'min':
initial = 1000
elif reduce == 'max':
initial = -1000
segment_reduce_args = {x, reduce}
segment_reduce_kwargs = dict(axis=dim, unsafe=True, initial=initial)
if mode == 'lengths':
segment_reduce_kwargs[mode] = lengths
elif mode == 'offsets':
segment_reduce_kwargs[mode] = indptr
return torch._segment_reduce(*segment_reduce_args, **segment_reduce_kwargs)
self.assertTrue(gradcheck(partial(fn, mode='lengths'), (data.clone().detach().requires_grad_(True))))
self.assertTrue(gradcheck(partial(fn, mode='offsets'), (data.clone().detach().requires_grad_(True))))
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_multi_d(self, device, dtypes):
val_dtype, length_type = dtypes
axis = 0
lengths = [0, 2, 3, 0]
data = np.arange(50).reshape(5, 2, 5).tolist()
expected_grad = []
# TODO: calculate grad and check correctness
check_backward = False
for reduction in reductions:
initial_value = 0
if reduction == "max":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.max(data[:2], axis=0).tolist(),
np.max(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "mean":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.mean(data[:2], axis=0).tolist(),
np.mean(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "min":
initial_value = 1000 # some high number
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.min(data[:2], axis=0).tolist(),
np.min(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "sum":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.sum(data[:2], axis=0).tolist(),
np.sum(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "prod":
initial_value = 1
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.prod(data[:2], axis=0).tolist(),
np.prod(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
)
@dtypes(torch.int, torch.int64)
def test_unsafe_flag(self, device, dtype):
length_type = dtype
lengths = torch.tensor([0, 2, 3, 0], device=device, dtype=length_type)
data = torch.arange(6, dtype=torch.float, device=device)
# test for error on 1-D lenghts
with self.assertRaisesRegex(RuntimeError, "Expected all rows of lengths along axis"):
torch._segment_reduce(data, 'sum', lengths=lengths, axis=0, unsafe=False)
# test for error on multi-D lengths
nd_lengths = torch.tensor([[0, 3, 3, 0], [2, 3, 0, 0]], dtype=length_type, device=device)
nd_data = torch.arange(12, dtype=torch.float, device=device).reshape(2, 6)
with self.assertRaisesRegex(RuntimeError, "Expected all rows of lengths along axis"):
torch._segment_reduce(nd_data, 'sum', lengths=nd_lengths, axis=1, unsafe=False)
instantiate_device_type_tests(TestSegmentReductions, globals())
if __name__ == "__main__":
run_tests()
|
from itertools import product
from functools import partial
import numpy as np
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
gradcheck,
parametrize,
)
reductions = ["max", "mean", "min", "sum", "prod"]
|
from itertools import product
from functools import partial
import numpy as np
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
gradcheck,
parametrize,
skipIfRocm,
)
reductions = ["max", "mean", "min", "sum", "prod"]
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_segment_reductions.py
|
fn
|
def fn(x, mode='lengths'):
initial = 1
# supply initial values to prevent gradcheck from failing for 0 length segments
# where nan/inf are reduction identities that produce nans when calculating the numerical jacobian
if reduce == 'amin':
initial = 1000
elif reduce == 'amax':
initial = -1000
segment_reduce_args = {x, reduce}
segment_reduce_kwargs = dict(axis=dim, unsafe=True, initial=initial)
if mode == 'lengths':
segment_reduce_kwargs[mode] = lengths
elif mode == 'offsets':
segment_reduce_kwargs[mode] = indptr
return torch._segment_reduce(*segment_reduce_args, **segment_reduce_kwargs)
self.assertTrue(gradcheck(partial(fn, mode='lengths'), (data.clone().detach().requires_grad_(True))))
self.assertTrue(gradcheck(partial(fn, mode='offsets'), (data.clone().detach().requires_grad_(True))))
|
def fn(x, mode='lengths'):
initial = 1
# supply initial values to prevent gradcheck from failing for 0 length segments
# where nan/inf are reduction identities that produce nans when calculating the numerical jacobian
if reduce == 'min':
initial = 1000
elif reduce == 'max':
initial = -1000
segment_reduce_args = {x, reduce}
segment_reduce_kwargs = dict(axis=dim, unsafe=True, initial=initial)
if mode == 'lengths':
segment_reduce_kwargs[mode] = lengths
elif mode == 'offsets':
segment_reduce_kwargs[mode] = indptr
return torch._segment_reduce(*segment_reduce_args, **segment_reduce_kwargs)
self.assertTrue(gradcheck(partial(fn, mode='lengths'), (data.clone().detach().requires_grad_(True))))
self.assertTrue(gradcheck(partial(fn, mode='offsets'), (data.clone().detach().requires_grad_(True))))
|
from itertools import product
from functools import partial
import numpy as np
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
gradcheck,
parametrize,
)
reductions = ["max", "mean", "min", "sum", "prod"]
|
from itertools import product
from functools import partial
import numpy as np
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
gradcheck,
parametrize,
skipIfRocm,
)
reductions = ["max", "mean", "min", "sum", "prod"]
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_dill
|
def test_serialization_dill(self):
x = torch.randn(5, 5)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f, pickle_module=dill)
f.seek(0)
x2 = torch.load(f, pickle_module=dill, encoding='utf-8')
self.assertIsInstance(x2, type(x))
self.assertEqual(x, x2)
f.seek(0)
x3 = torch.load(f, pickle_module=dill)
self.assertIsInstance(x3, type(x))
self.assertEqual(x, x3)
|
def test_serialization_dill(self):
x = torch.randn(5, 5)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f, pickle_module=dill)
f.seek(0)
# weights_only=False as True does not support custom pickle_module
x2 = torch.load(f, pickle_module=dill, encoding='utf-8', weights_only=False)
self.assertIsInstance(x2, type(x))
self.assertEqual(x, x2)
f.seek(0)
# weights_only=False as True does not support custom pickle_module
x3 = torch.load(f, pickle_module=dill, weights_only=False)
self.assertIsInstance(x3, type(x))
self.assertEqual(x, x3)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_sparse_invalid
|
def test_serialization_sparse_invalid(self):
x = torch.zeros(3, 3)
x[1][1] = 1
x = x.to_sparse()
class TensorSerializationSpoofer:
def __init__(self, tensor):
self.tensor = tensor
def __reduce_ex__(self, proto):
invalid_indices = self.tensor._indices().clone()
invalid_indices[0][0] = 3
return (
torch._utils._rebuild_sparse_tensor,
(
self.tensor.layout,
(
invalid_indices,
self.tensor._values(),
self.tensor.size())))
with tempfile.NamedTemporaryFile() as f:
torch.save({"spoofed": TensorSerializationSpoofer(x)}, f)
f.seek(0)
with self.assertRaisesRegex(
RuntimeError,
"size is inconsistent with indices"):
y = torch.load(f)
|
def test_serialization_sparse_invalid(self):
x = torch.zeros(3, 3)
x[1][1] = 1
x = x.to_sparse()
class TensorSerializationSpoofer:
def __init__(self, tensor):
self.tensor = tensor
def __reduce_ex__(self, proto):
invalid_indices = self.tensor._indices().clone()
invalid_indices[0][0] = 3
return (
torch._utils._rebuild_sparse_tensor,
(
self.tensor.layout,
(
invalid_indices,
self.tensor._values(),
self.tensor.size())))
with tempfile.NamedTemporaryFile() as f:
torch.save({"spoofed": TensorSerializationSpoofer(x)}, f)
for weights_only in (False, True):
f.seek(0)
with self.assertRaisesRegex(
RuntimeError,
"size is inconsistent with indices"):
y = torch.load(f, weights_only=weights_only)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
generate_map_locations
|
def generate_map_locations(device_type):
return [
{'cuda:0': device_type + ':0'},
device_type,
device_type + ':0',
torch.device(device_type),
torch.device(device_type, 0)
]
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
load_bytes
|
def load_bytes():
with open(test_file_path, 'rb') as f:
return io.BytesIO(f.read())
fileobject_lambdas = [lambda: test_file_path, load_bytes]
cpu_map_locations = [
map_location,
{'cuda:0': 'cpu'},
'cpu',
torch.device('cpu'),
]
gpu_0_map_locations = [
{'cuda:0': 'cuda:0'},
'cuda',
'cuda:0',
torch.device('cuda'),
torch.device('cuda', 0)
]
gpu_last_map_locations = [
'cuda:{}'.format(torch.cuda.device_count() - 1),
]
|
def load_bytes():
with open(test_file_path, 'rb') as f:
return io.BytesIO(f.read())
fileobject_lambdas = [lambda: test_file_path, load_bytes]
cpu_map_locations = [
map_location,
{'cuda:0': 'cpu'},
'cpu',
torch.device('cpu'),
]
gpu_0_map_locations = generate_map_locations('cuda')
gpu_last_map_locations = [
f'cuda:{torch.cuda.device_count() - 1}',
]
xpu_0_map_locations = generate_map_locations('xpu')
xpu_last_map_locations = [
f'xpu:{torch.xpu.device_count() - 1}',
]
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
check_map_locations
|
def check_map_locations(map_locations, tensor_class, intended_device):
for fileobject_lambda in fileobject_lambdas:
for map_location in map_locations:
tensor = torch.load(fileobject_lambda(), map_location=map_location)
self.assertEqual(tensor.device, intended_device)
self.assertIsInstance(tensor, tensor_class)
self.assertEqual(tensor, tensor_class([[1.0, 2.0], [3.0, 4.0]]))
check_map_locations(cpu_map_locations, torch.FloatTensor, torch.device('cpu'))
if torch.cuda.is_available():
check_map_locations(gpu_0_map_locations, torch.cuda.FloatTensor, torch.device('cuda', 0))
check_map_locations(
gpu_last_map_locations,
torch.cuda.FloatTensor,
torch.device('cuda', torch.cuda.device_count() - 1)
)
|
def check_map_locations(map_locations, dtype, intended_device):
for fileobject_lambda in fileobject_lambdas:
for map_location in map_locations:
tensor = torch.load(fileobject_lambda(), map_location=map_location)
self.assertEqual(tensor.device, intended_device)
self.assertEqual(tensor.dtype, dtype)
self.assertEqual(tensor, torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=dtype, device=intended_device))
check_map_locations(cpu_map_locations, torch.float, torch.device('cpu'))
if torch.cuda.is_available():
check_map_locations(gpu_0_map_locations, torch.float, torch.device('cuda', 0))
check_map_locations(
gpu_last_map_locations,
torch.float,
torch.device('cuda', torch.cuda.device_count() - 1)
)
if torch.xpu.is_available():
check_map_locations(xpu_0_map_locations, torch.float, torch.device('xpu', 0))
check_map_locations(
xpu_last_map_locations,
torch.float,
torch.device('xpu', torch.xpu.device_count() - 1)
)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_load_unicode_error_msg
|
def test_load_unicode_error_msg(self):
# This Pickle contains a Python 2 module with Unicode data and the
# loading should fail if the user explicitly specifies ascii encoding!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
self.assertRaises(UnicodeDecodeError, lambda: torch.load(path, encoding='ascii'))
|
def test_load_unicode_error_msg(self):
# This Pickle contains a Python 2 module with Unicode data and the
# loading should fail if the user explicitly specifies ascii encoding!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
# weights_only=False as this is legacy code that saves the model
self.assertRaises(UnicodeDecodeError, lambda: torch.load(path, encoding='ascii', weights_only=False))
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_load_python2_unicode_module
|
def test_load_python2_unicode_module(self):
# This Pickle contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings(record=True) as w:
self.assertIsNotNone(torch.load(path))
|
def test_load_python2_unicode_module(self):
# This Pickle contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings(record=True) as w:
# weights_only=False as this is legacy code that saves the model
self.assertIsNotNone(torch.load(path, weights_only=False))
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_load_error_msg
|
def test_load_error_msg(self):
expected_err_msg = (".*You can only torch.load from a file that is seekable. " +
"Please pre-load the data into a buffer like io.BytesIO and " +
"try to load from it instead.")
resource = FilelikeMock(data=b"data")
delattr(resource, "tell")
delattr(resource, "seek")
with self.assertRaisesRegex(AttributeError, expected_err_msg):
torch.load(resource)
|
def test_load_error_msg(self):
expected_err_msg = (".*You can only torch.load from a file that is seekable. " +
"Please pre-load the data into a buffer like io.BytesIO and " +
"try to load from it instead.")
resource = FilelikeMock(data=b"data")
delattr(resource, "tell")
delattr(resource, "seek")
with self.assertRaisesRegex(AttributeError, expected_err_msg):
# weights_only=False as this is legacy code that saves the model
torch.load(resource, weights_only=False)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
class SerializationMixin:
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_new_format_old_format_compat
|
def test_serialization_new_format_old_format_compat(self, device):
self._test_serialization_new_format_old_format_compat(device, False)
|
def test_serialization_new_format_old_format_compat(self, device, weights_only):
x = [torch.ones(200, 200, device=device) for i in range(30)]
def test(f_new, f_old):
torch.save(x, f_new, _use_new_zipfile_serialization=True)
f_new.seek(0)
x_new_load = torch.load(f_new, weights_only=weights_only)
self.assertEqual(x, x_new_load)
torch.save(x, f_old, _use_new_zipfile_serialization=False)
f_old.seek(0)
x_old_load = torch.load(f_old, weights_only=weights_only)
self.assertEqual(x_old_load, x_new_load)
with AlwaysWarnTypedStorageRemoval(True), warnings.catch_warnings(record=True) as w:
with tempfile.NamedTemporaryFile() as f_new, tempfile.NamedTemporaryFile() as f_old:
test(f_new, f_old)
self.assertTrue(len(w) == 0, msg=f"Expected no warnings but got {[str(x) for x in w]}")
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
class TestBothSerialization(TestCase):
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
class TestBothSerialization(TestCase):
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
_test_serialization_container
|
def _test_serialization_container(self, unique_key, filecontext_lambda):
tmpmodule_name = 'tmpmodule{}'.format(unique_key)
def import_module(name, filename):
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module.__name__] = module
return module
with filecontext_lambda() as checkpoint:
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network1.py')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
# First check that the checkpoint can be loaded without warning about unsafe loads
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 0)
# Replace the module with different source
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network2.py')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 1)
self.assertTrue(w[0].category, 'SourceChangeWarning')
|
def _test_serialization_container(self, unique_key, filecontext_lambda):
tmpmodule_name = f'tmpmodule{unique_key}'
def import_module(name, filename):
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module.__name__] = module
return module
with filecontext_lambda() as checkpoint:
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network1.py')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
# First check that the checkpoint can be loaded without warning about unsafe loads
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
# weights_only=False as this is legacy code that saves the model
loaded = torch.load(checkpoint, weights_only=False)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 0)
# Replace the module with different source
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network2.py')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
# weights_only=False as this is legacy code that saves the model
loaded = torch.load(checkpoint, weights_only=False)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, SourceChangeWarning)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
class TestOldSerialization(TestCase, SerializationMixin):
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
class TestOldSerialization(TestCase, SerializationMixin):
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
import_module
|
def import_module(name, filename):
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module.__name__] = module
return module
with filecontext_lambda() as checkpoint:
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network1.py')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
# First check that the checkpoint can be loaded without warning about unsafe loads
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 0)
# Replace the module with different source
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network2.py')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 1)
self.assertTrue(w[0].category, 'SourceChangeWarning')
|
def import_module(name, filename):
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module.__name__] = module
return module
with filecontext_lambda() as checkpoint:
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network1.py')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
# First check that the checkpoint can be loaded without warning about unsafe loads
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
# weights_only=False as this is legacy code that saves the model
loaded = torch.load(checkpoint, weights_only=False)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 0)
# Replace the module with different source
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network2.py')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
# weights_only=False as this is legacy code that saves the model
loaded = torch.load(checkpoint, weights_only=False)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, SourceChangeWarning)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_zipfile
|
def test_serialization_zipfile(self, weights_only):
data = self._test_serialization_data()
def test(name_or_buffer):
torch.save(data, name_or_buffer)
if hasattr(name_or_buffer, 'seek'):
name_or_buffer.seek(0)
result = torch.load(name_or_buffer, weights_only=weights_only)
self.assertEqual(result, data)
with tempfile.NamedTemporaryFile() as f:
test(f)
with TemporaryFileName() as fname:
test(fname)
test(io.BytesIO())
|
def test_serialization_zipfile(self, weights_only):
data = self._test_serialization_data()
def test(name_or_buffer):
torch.save(data, name_or_buffer)
if hasattr(name_or_buffer, 'seek'):
name_or_buffer.seek(0)
result = torch.load(name_or_buffer, weights_only=weights_only)
self.assertEqual(result, data)
with tempfile.NamedTemporaryFile() as f:
test(f)
with TemporaryFileName() as fname:
test(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='\u975eASCII\u30d1\u30b9') as dname:
with TemporaryFileName(dir=dname) as fname:
test(fname)
test(io.BytesIO())
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_new_format_old_format_compat
|
def test_serialization_new_format_old_format_compat(self, device):
self._test_serialization_new_format_old_format_compat(device, False)
|
def test_serialization_new_format_old_format_compat(self, device, weights_only):
x = [torch.ones(200, 200, device=device) for i in range(30)]
def test(f_new, f_old):
torch.save(x, f_new, _use_new_zipfile_serialization=True)
f_new.seek(0)
x_new_load = torch.load(f_new, weights_only=weights_only)
self.assertEqual(x, x_new_load)
torch.save(x, f_old, _use_new_zipfile_serialization=False)
f_old.seek(0)
x_old_load = torch.load(f_old, weights_only=weights_only)
self.assertEqual(x_old_load, x_new_load)
with AlwaysWarnTypedStorageRemoval(True), warnings.catch_warnings(record=True) as w:
with tempfile.NamedTemporaryFile() as f_new, tempfile.NamedTemporaryFile() as f_old:
test(f_new, f_old)
self.assertTrue(len(w) == 0, msg=f"Expected no warnings but got {[str(x) for x in w]}")
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
class TestBothSerialization(TestCase):
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
class TestBothSerialization(TestCase):
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_weights_only_safe_globals_build
|
def test_weights_only_safe_globals_build(self):
counter = 0
def fake_set_state(obj, *args):
nonlocal counter
counter += 1
c = ClassThatUsesBuildInstruction(2)
with BytesIOContext() as f:
torch.save(c, f)
f.seek(0)
with self.assertRaisesRegex(pickle.UnpicklingError,
"GLOBAL __main__.ClassThatUsesBuildInstruction was not an allowed global by default"):
torch.load(f, weights_only=True)
try:
torch.serialization.add_safe_globals([ClassThatUsesBuildInstruction])
# Test dict update path
f.seek(0)
loaded_c = torch.load(f, weights_only=True)
self.assertEqual(loaded_c.num, 2)
self.assertEqual(loaded_c.foo, 'bar')
# Test setstate path
ClassThatUsesBuildInstruction.__setstate__ = fake_set_state
f.seek(0)
loaded_c = torch.load(f, weights_only=True)
self.assertEqual(loaded_c.num, 2)
self.assertEqual(counter, 1)
self.assertFalse(hasattr(loaded_c, 'foo'))
finally:
torch.serialization.clear_safe_globals()
ClassThatUsesBuildInstruction.__setstate__ = None
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
fake_set_state
|
def fake_set_state(obj, *args):
nonlocal counter
counter += 1
c = ClassThatUsesBuildInstruction(2)
with BytesIOContext() as f:
torch.save(c, f)
f.seek(0)
with self.assertRaisesRegex(pickle.UnpicklingError,
"GLOBAL __main__.ClassThatUsesBuildInstruction was not an allowed global by default"):
torch.load(f, weights_only=True)
try:
torch.serialization.add_safe_globals([ClassThatUsesBuildInstruction])
# Test dict update path
f.seek(0)
loaded_c = torch.load(f, weights_only=True)
self.assertEqual(loaded_c.num, 2)
self.assertEqual(loaded_c.foo, 'bar')
# Test setstate path
ClassThatUsesBuildInstruction.__setstate__ = fake_set_state
f.seek(0)
loaded_c = torch.load(f, weights_only=True)
self.assertEqual(loaded_c.num, 2)
self.assertEqual(counter, 1)
self.assertFalse(hasattr(loaded_c, 'foo'))
finally:
torch.serialization.clear_safe_globals()
ClassThatUsesBuildInstruction.__setstate__ = None
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_efficient_zerotensor
|
def test_serialization_efficient_zerotensor(self, weights_only):
# We don't support serializing `ZeroTensor` as it is not public
# facing yet.
# If in future, `ZeroTensor` serialization is supported, this test
# should start failing!
t = torch._efficientzerotensor((4, 5))
def _save_load_check(t):
with BytesIOContext() as f:
torch.save(t, f)
f.seek(0)
# Unsafe load should work
self.assertEqual(torch.load(f, weights_only=weights_only), t)
# NOTE: `torch.save` fails before we hit the TORCH_CHECK in `getTensoMetadata`
# as nullptr storage is disabled.
err_msg = (r'python bindings to nullptr storage \(e.g., from torch.Tensor._make_wrapper_subclass\)'
' are currently unsafe and thus disabled')
with self.assertRaisesRegex(RuntimeError, err_msg):
_save_load_check(t)
|
def test_serialization_efficient_zerotensor(self, weights_only):
# We don't support serializing `ZeroTensor` as it is not public
# facing yet.
# If in future, `ZeroTensor` serialization is supported, this test
# should start failing!
t = torch._efficientzerotensor((4, 5))
def _save_load_check(t):
with BytesIOContext() as f:
torch.save(t, f)
f.seek(0)
# Unsafe load should work
self.assertEqual(torch.load(f, weights_only=weights_only), t)
# NOTE: `torch.save` fails before we hit the TORCH_CHECK in `getTensoMetadata`
# as nullptr storage is disabled.
with self.assertRaisesRegex(RuntimeError, 'ZeroTensor is not serializable'):
_save_load_check(t)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_byteorder_mark
|
def test_serialization_byteorder_mark(self):
lstm = torch.nn.LSTM(3, 3)
inputs = [torch.randn(1, 3) for _ in range(5)]
inputs = torch.cat(inputs).view(len(inputs), 1, -1)
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3)) # clean out hidden state
databuffer = io.BytesIO()
torch.save(lstm.state_dict(), databuffer)
databuffer.seek(0)
with torch.serialization._open_zipfile_reader(databuffer) as zip_file:
byteordername = 'byteorder'
self.assertTrue(zip_file.has_record(byteordername))
byteorderdata = zip_file.get_record(byteordername)
self.assertTrue(byteorderdata in [b'little', b'big'])
self.assertEqual(byteorderdata.decode(), sys.byteorder)
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data
|
def test_serialization_load_bom_data(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# lstm = torch.nn.LSTM(3, 3)
# inputs = [torch.randn(1, 3) for _ in range(5)]
#
# inputs = torch.cat(inputs).view(len(inputs), 1, -1)
# hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3))
#
# torch.save(lstm.state_dict(), "lstm.LE.pt", _disable_byteorder_record=True)
# torch.save(lstm.state_dict(), "lstm.LE.BOM.pt")
#
# print(lstm.state_dict())
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# lstm = torch.nn.LSTM(3, 3)
# lstm.load_state_dict(torch.load("lstm.LE.BOM.pt"), strict=True)
#
# torch.save(lstm.state_dict(), "lstm.BE.pt", _disable_byteorder_record=True)
# torch.save(lstm.state_dict(), "lstm.BE.BOM.pt")
#
# print(lstm.state_dict())
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\r\x00\x15\x00lstm/data.pklFB\x11\x00ZZZZZZZZZZZZZZZZZ\x80\x02'
b'ccollections\nOrderedDict\nq\x00)Rq\x01(X\x0c\x00\x00\x00weight_ih_l0q\x02ctor'
b'ch._utils\n_rebuild_tensor_v2\nq\x03((X\x07\x00\x00\x00storageq\x04ctorch\nFloat'
b'Storage\nq\x05X\x01\x00\x00\x000q\x06X\x03\x00\x00\x00cpuq\x07K$tq\x08QK\x00K\x0c'
b'K\x03\x86q\tK\x03K\x01\x86q\n\x89h\x00)Rq\x0btq\x0cRq\rX\x0c\x00\x00\x00weight_'
b'hh_l0q\x0eh\x03((h\x04h\x05X\x01\x00\x00\x001q\x0fh\x07K$tq\x10QK\x00K\x0cK\x03\x86'
b'q\x11K\x03K\x01\x86q\x12\x89h\x00)Rq\x13tq\x14Rq\x15X\n\x00\x00\x00bias_ih_l0'
b'q\x16h\x03((h\x04h\x05X\x01\x00\x00\x002q\x17h\x07K\x0ctq\x18QK\x00K\x0c\x85q\x19'
b'K\x01\x85q\x1a\x89h\x00)Rq\x1btq\x1cRq\x1dX\n\x00\x00\x00bias_hh_l0q\x1eh\x03(('
b'h\x04h\x05X\x01\x00\x00\x003q\x1fh\x07K\x0ctq QK\x00K\x0c\x85q!K\x01\x85q"\x89h\x00'
b')Rq#tq$Rq%u}q&X\t\x00\x00\x00_metadataq\'h\x00)Rq(X\x00\x00\x00\x00q)}q*X\x07'
b'\x00\x00\x00versionq+K\x01sssb.PK\x07\x08\xab\xf1\xfb\x01\xb8\x01\x00\x00\xb8\x01'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x0b\x00\x0f\x00lstm/data/0FB\x0b\x00ZZZZZZZZZZZ\nuJ\xbe'
b'X*\xa2\xbe\xc4\xea\x10>\xd4\n\x8d\xbe\x1c\x10\x8a\xbe\xb02\xe4\xbe,\xcb4>\x00'
b'\x17!>H\x9c\xe0\xbe\xd2\x15!\xbe6C\xc6>v\xc5\x89>\xae\x14\x81\xbeZ\xc7\x99>\x90P'
b'\x01?`\xb9\x9a<\xc0 <=\'\xc7\x9e\xbe\xaa\xf4\x02?\x00\xf3\x0e\xbc\xd8\xb7v\xbe\xa0'
b'\xcc\xcd=$/\xaf>\x00\xc4K=0\xb8\xe5\xbe\xb6\xc5U\xbe\xc4i\xf3\xbe\xa45\xdc>\x06'
b'g\x8d>N!\xae>2Fr\xbe0hb\xbd\xf0we\xbd g\xa0<\xb6\xbe\x9e\xbe\x14\xd1\xc2>PK\x07'
b'\x08j\xd9\xb9M\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x007\x00lst'
b'm/data/1FB3\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ|[\xe1>\xa2Yd\xbe'
b'\xa5o\t\xbfz\x1c\x05\xbe \xb1\xdb<\xf0\xcd\xfc>\xa2u\xcb>\x8c\x87{\xbe\x9c\x9b'
b'^>\xacmG>\xae\x17\x93>\x8e\xc5\xf0\xbet\x1c\xfc>\xcb\x84\x81\xbe\xc8\xa6 >\x88\xee'
b'\xaf=\n\xc9\x8d>\xc0\xc5\xee>\xf0E\x91>\xf4^\xa1>\xb8\xbbF>\x97\x97\xfe\xbe\xec'
b'\x85\x03?h\x9c\xf3=\xf2\xa8\x97>^\xfa\r?6i\x94\xbe\xbc1w\xbeh\xc4\x8a=\x94\xc8'
b'\x9f\xbd\x81\xb5\x89\xbe(K\xb0>\xf0:z\xbd\xb0\xc6\x9b\xbdX\x00\x88=\x05\xc7\x11\xbf'
b'PK\x07\x08\x12\xc0\x87\x96\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b'
b'\x007\x00lstm/data/2FB3\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'Z\xb0\xc2f=@\xdd1<\x864\xd8\xbe\xa0\t\x13?+g\x8f\xbeu\xb1\r\xbfbl\xc3>\xa8\\\x82'
b'\xbe\xa4c\xf3\xbd,\x96\xdf\xbe\xfe\x05\xf1\xbe\xf8\xc9\x96>PK\x07\x08\x92\tK?0\x00'
b'\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x17\x00lstm/data/3FB\x13\x00ZZ'
b'ZZZZZZZZZZZZZZZZZ\x04\xaai\xbe\xce\xd8\x8a\xbe\xe3O\xdf\xbe$\xc3\xd2\xbe\x06\xb1'
b'\x80\xbe^&\x08?\x00\x1a}\xbd\x06\xde\r?\x04\xe7\xac>Z@\xe9\xbe\x14\xc2)>\x9c\xe9'
b'/>PK\x07\x08\x1axU\xe80\x00\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00'
b'lstm/versionFB\x12\x00ZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xab\xf1'
b'\xfb\x01\xb8\x01\x00\x00\xb8\x01\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00lstm/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00j\xd9\xb9M\x90\x00\x00\x00\x90\x00\x00\x00\x0b\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x02\x00\x00lstm/data/0PK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x12\xc0\x87\x96\x90\x00\x00\x00\x90'
b'\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x02\x00'
b'\x00lstm/data/1PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x92'
b'\tK?0\x00\x00\x000\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xe0\x03\x00\x00lstm/data/2PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x1axU\xe80\x00\x00\x000\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x80\x04\x00\x00lstm/data/3PK\x01\x02\x00\x00\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x0c\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00lstm/versionPK\x06'
b'\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06'
b'\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00Y\x01\x00\x00\x00\x00'
b'\x00\x00R\x05\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xab\x06\x00\x00'
b'\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x06\x00\x06\x00Y\x01'
b'\x00\x00R\x05\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x12\x00\x10\x00lstm.save/data.pklFB\x0c\x00ZZZZZZZZZZZZ\x80'
b'\x02ccollections\nOrderedDict\nq\x00)Rq\x01(X\x0c\x00\x00\x00weight_ih_l0q\x02ct'
b'orch._utils\n_rebuild_tensor_v2\nq\x03((X\x07\x00\x00\x00storageq\x04ctorch\nFlo'
b'atStorage\nq\x05X\x01\x00\x00\x000q\x06X\x03\x00\x00\x00cpuq\x07K$tq\x08QK\x00K\x0c'
b'K\x03\x86q\tK\x03K\x01\x86q\n\x89h\x00)Rq\x0btq\x0cRq\rX\x0c\x00\x00\x00weigh'
b't_hh_l0q\x0eh\x03((h\x04h\x05X\x01\x00\x00\x001q\x0fh\x07K$tq\x10QK\x00K\x0cK\x03'
b'\x86q\x11K\x03K\x01\x86q\x12\x89h\x00)Rq\x13tq\x14Rq\x15X\n\x00\x00\x00bias_ih_'
b'l0q\x16h\x03((h\x04h\x05X\x01\x00\x00\x002q\x17h\x07K\x0ctq\x18QK\x00K\x0c\x85q\x19'
b'K\x01\x85q\x1a\x89h\x00)Rq\x1btq\x1cRq\x1dX\n\x00\x00\x00bias_hh_l0q\x1eh\x03'
b'((h\x04h\x05X\x01\x00\x00\x003q\x1fh\x07K\x0ctq QK\x00K\x0c\x85q!K\x01\x85q"\x89'
b'h\x00)Rq#tq$Rq%u}q&X\t\x00\x00\x00_metadataq\'h\x00)Rq(X\x00\x00\x00\x00q)}q*X\x07'
b'\x00\x00\x00versionq+K\x01sssb.PK\x07\x08\xab\xf1\xfb\x01\xb8\x01\x00\x00\xb8\x01'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13\x00\x07\x00lstm.save/byteorderFB\x03\x00ZZZlit'
b'tlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10'
b'\x00<\x00lstm.save/data/0FB8\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZ\nuJ\xbeX*\xa2\xbe\xc4\xea\x10>\xd4\n\x8d\xbe\x1c\x10\x8a\xbe\xb02\xe4\xbe'
b',\xcb4>\x00\x17!>H\x9c\xe0\xbe\xd2\x15!\xbe6C\xc6>v\xc5\x89>\xae\x14\x81\xbeZ\xc7'
b'\x99>\x90P\x01?`\xb9\x9a<\xc0 <=\'\xc7\x9e\xbe\xaa\xf4\x02?\x00\xf3\x0e\xbc\xd8'
b'\xb7v\xbe\xa0\xcc\xcd=$/\xaf>\x00\xc4K=0\xb8\xe5\xbe\xb6\xc5U\xbe\xc4i\xf3\xbe'
b'\xa45\xdc>\x06g\x8d>N!\xae>2Fr\xbe0hb\xbd\xf0we\xbd g\xa0<\xb6\xbe\x9e\xbe\x14\xd1'
b'\xc2>PK\x07\x08j\xd9\xb9M\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10'
b'\x002\x00lstm.save/data/1FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ|'
b'[\xe1>\xa2Yd\xbe\xa5o\t\xbfz\x1c\x05\xbe \xb1\xdb<\xf0\xcd\xfc>\xa2u\xcb>\x8c\x87'
b'{\xbe\x9c\x9b^>\xacmG>\xae\x17\x93>\x8e\xc5\xf0\xbet\x1c\xfc>\xcb\x84\x81\xbe\xc8'
b'\xa6 >\x88\xee\xaf=\n\xc9\x8d>\xc0\xc5\xee>\xf0E\x91>\xf4^\xa1>\xb8\xbbF>\x97\x97'
b'\xfe\xbe\xec\x85\x03?h\x9c\xf3=\xf2\xa8\x97>^\xfa\r?6i\x94\xbe\xbc1w\xbeh\xc4'
b'\x8a=\x94\xc8\x9f\xbd\x81\xb5\x89\xbe(K\xb0>\xf0:z\xbd\xb0\xc6\x9b\xbdX\x00\x88='
b'\x05\xc7\x11\xbfPK\x07\x08\x12\xc0\x87\x96\x90\x00\x00\x00\x90\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x10\x002\x00lstm.save/data/2FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZ\xb0\xc2f=@\xdd1<\x864\xd8\xbe\xa0\t\x13?+g\x8f\xbeu\xb1\r\xbfbl\xc3'
b'>\xa8\\\x82\xbe\xa4c\xf3\xbd,\x96\xdf\xbe\xfe\x05\xf1\xbe\xf8\xc9\x96>PK\x07\x08'
b'\x92\tK?0\x00\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x12\x00lstm.save/'
b'data/3FB\x0e\x00ZZZZZZZZZZZZZZ\x04\xaai\xbe\xce\xd8\x8a\xbe\xe3O\xdf\xbe$\xc3\xd2'
b'\xbe\x06\xb1\x80\xbe^&\x08?\x00\x1a}\xbd\x06\xde\r?\x04\xe7\xac>Z@\xe9\xbe\x14\xc2'
b')>\x9c\xe9/>PK\x07\x08\x1axU\xe80\x00\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'
b'\x00\x11\x00lstm.save/versionFB\r\x00ZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02'
b'\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xab\xf1\xfb\x01\xb8\x01\x00\x00\xb8\x01\x00\x00\x12\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00lstm.save/data.pklPK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00'
b'\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x02\x00\x00l'
b'stm.save/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00j\xd9'
b'\xb9M\x90\x00\x00\x00\x90\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00V\x02\x00\x00lstm.save/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x12\xc0\x87\x96\x90\x00\x00\x00\x90\x00\x00\x00\x10\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x03\x00\x00lstm.save/data/1PK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x92\tK?0\x00\x00\x000\x00\x00'
b'\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x04\x00\x00lstm.'
b'save/data/2PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x1axU\xe80'
b'\x00\x00\x000\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x05\x00\x00lstm.save/data/3PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x80\x05\x00\x00lstm.save/versionPK\x06\x06,\x00\x00'
b'\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00'
b'\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x00\x00\x00'
b'\xd2\x05\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\x8a\x07\x00\x00\x00'
b'\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x07\x00\x07\x00\xb8\x01\x00'
b'\x00\xd2\x05\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x12\x00\x10\x00lstm.save/data.pklFB\x0c\x00ZZZZZZZZZZZZ\x80'
b'\x02ccollections\nOrderedDict\nq\x00)Rq\x01(X\x0c\x00\x00\x00weight_ih_l0q\x02ct'
b'orch._utils\n_rebuild_tensor_v2\nq\x03((X\x07\x00\x00\x00storageq\x04ctorch\nFlo'
b'atStorage\nq\x05X\x01\x00\x00\x000q\x06X\x03\x00\x00\x00cpuq\x07K$tq\x08QK\x00K\x0c'
b'K\x03\x86q\tK\x03K\x01\x86q\n\x89h\x00)Rq\x0btq\x0cRq\rX\x0c\x00\x00\x00weigh'
b't_hh_l0q\x0eh\x03((h\x04h\x05X\x01\x00\x00\x001q\x0fh\x07K$tq\x10QK\x00K\x0cK\x03'
b'\x86q\x11K\x03K\x01\x86q\x12\x89h\x00)Rq\x13tq\x14Rq\x15X\n\x00\x00\x00bias_ih_'
b'l0q\x16h\x03((h\x04h\x05X\x01\x00\x00\x002q\x17h\x07K\x0ctq\x18QK\x00K\x0c\x85q\x19'
b'K\x01\x85q\x1a\x89h\x00)Rq\x1btq\x1cRq\x1dX\n\x00\x00\x00bias_hh_l0q\x1eh\x03'
b'((h\x04h\x05X\x01\x00\x00\x003q\x1fh\x07K\x0ctq QK\x00K\x0c\x85q!K\x01\x85q"\x89'
b'h\x00)Rq#tq$Rq%u}q&X\t\x00\x00\x00_metadataq\'h\x00)Rq(X\x00\x00\x00\x00q)}q*X\x07'
b'\x00\x00\x00versionq+K\x01sssb.PK\x07\x08\xab\xf1\xfb\x01\xb8\x01\x00\x00\xb8\x01'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x10\x00\n\x00lstm.save/data/0FB\x06\x00ZZZZZZ\xbeJ'
b'u\n\xbe\xa2*X>\x10\xea\xc4\xbe\x8d\n\xd4\xbe\x8a\x10\x1c\xbe\xe42\xb0>4\xcb,>!\x17'
b'\x00\xbe\xe0\x9cH\xbe!\x15\xd2>\xc6C6>\x89\xc5v\xbe\x81\x14\xae>\x99\xc7Z?\x01'
b'P\x90<\x9a\xb9`=< \xc0\xbe\x9e\xc7\'?\x02\xf4\xaa\xbc\x0e\xf3\x00\xbev\xb7\xd8=\xcd'
b'\xcc\xa0>\xaf/$=K\xc4\x00\xbe\xe5\xb80\xbeU\xc5\xb6\xbe\xf3i\xc4>\xdc5\xa4>\x8d'
b'g\x06>\xae!N\xberF2\xbdbh0\xbdew\xf0<\xa0g \xbe\x9e\xbe\xb6>\xc2\xd1\x14PK\x07'
b'\x08\xc2yG\xba\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x002\x00lst'
b'm.save/data/1FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ>\xe1[|\xbedY\xa2'
b'\xbf\to\xa5\xbe\x05\x1cz<\xdb\xb1 >\xfc\xcd\xf0>\xcbu\xa2\xbe{\x87\x8c>^\x9b\x9c'
b'>Gm\xac>\x93\x17\xae\xbe\xf0\xc5\x8e>\xfc\x1ct\xbe\x81\x84\xcb> \xa6\xc8=\xaf'
b'\xee\x88>\x8d\xc9\n>\xee\xc5\xc0>\x91E\xf0>\xa1^\xf4>F\xbb\xb8\xbe\xfe\x97\x97?\x03'
b'\x85\xec=\xf3\x9ch>\x97\xa8\xf2?\r\xfa^\xbe\x94i6\xbew1\xbc=\x8a\xc4h\xbd\x9f'
b'\xc8\x94\xbe\x89\xb5\x81>\xb0K(\xbdz:\xf0\xbd\x9b\xc6\xb0=\x88\x00X\xbf\x11\xc7\x05'
b'PK\x07\x08\xd0\xbftD\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00'
b'2\x00lstm.save/data/2FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ=f\xc2'
b'\xb0<1\xdd@\xbe\xd84\x86?\x13\t\xa0\xbe\x8fg+\xbf\r\xb1u>\xc3lb\xbe\x82\\\xa8\xbd'
b'\xf3c\xa4\xbe\xdf\x96,\xbe\xf1\x05\xfe>\x96\xc9\xf8PK\x07\x08"\xc5\xc5O0\x00\x00'
b'\x000\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x12\x00lstm.save/data/3FB\x0e\x00Z'
b'ZZZZZZZZZZZZZ\xbei\xaa\x04\xbe\x8a\xd8\xce\xbe\xdfO\xe3\xbe\xd2\xc3$\xbe\x80\xb1'
b'\x06?\x08&^\xbd}\x1a\x00?\r\xde\x06>\xac\xe7\x04\xbe\xe9@Z>)\xc2\x14>/\xe9\x9cPK'
b'\x07\x08\xfb\xfd/\x920\x00\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x11\x00'
b'lstm.save/versionFB\r\x00ZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02'
b'\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xab\xf1'
b'\xfb\x01\xb8\x01\x00\x00\xb8\x01\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00lstm.save/data.pklPK\x01\x02\x00\x00\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\xc2yG\xba\x90\x00\x00\x00\x90\x00\x00\x00\x10\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x02\x00\x00lstm.save/data/0PK'
b'\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd0\xbftD\x90\x00\x00\x00'
b'\x90\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x02'
b'\x00\x00lstm.save/data/1PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00"\xc5\xc5O0\x00\x00\x000\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\xe0\x03\x00\x00lstm.save/data/2PK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\xfb\xfd/\x920\x00\x00\x000\x00\x00\x00\x10\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x04\x00\x00lstm.save/data/3PK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02'
b'\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00'
b'\x00lstm.save/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00'
b'\x00\x00w\x01\x00\x00\x00\x00\x00\x00R\x05\x00\x00\x00\x00\x00\x00PK\x06\x07\x00'
b'\x00\x00\x00\xc9\x06\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00'
b'\x00\x00\x06\x00\x06\x00w\x01\x00\x00R\x05\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x12\x00\x10\x00lstm.save/data.pklFB\x0c\x00ZZZZZZZZZZZZ\x80'
b'\x02ccollections\nOrderedDict\nq\x00)Rq\x01(X\x0c\x00\x00\x00weight_ih_l0q\x02ct'
b'orch._utils\n_rebuild_tensor_v2\nq\x03((X\x07\x00\x00\x00storageq\x04ctorch\nFlo'
b'atStorage\nq\x05X\x01\x00\x00\x000q\x06X\x03\x00\x00\x00cpuq\x07K$tq\x08QK\x00K\x0c'
b'K\x03\x86q\tK\x03K\x01\x86q\n\x89h\x00)Rq\x0btq\x0cRq\rX\x0c\x00\x00\x00weigh'
b't_hh_l0q\x0eh\x03((h\x04h\x05X\x01\x00\x00\x001q\x0fh\x07K$tq\x10QK\x00K\x0cK\x03'
b'\x86q\x11K\x03K\x01\x86q\x12\x89h\x00)Rq\x13tq\x14Rq\x15X\n\x00\x00\x00bias_ih_'
b'l0q\x16h\x03((h\x04h\x05X\x01\x00\x00\x002q\x17h\x07K\x0ctq\x18QK\x00K\x0c\x85q\x19'
b'K\x01\x85q\x1a\x89h\x00)Rq\x1btq\x1cRq\x1dX\n\x00\x00\x00bias_hh_l0q\x1eh\x03'
b'((h\x04h\x05X\x01\x00\x00\x003q\x1fh\x07K\x0ctq QK\x00K\x0c\x85q!K\x01\x85q"\x89'
b'h\x00)Rq#tq$Rq%u}q&X\t\x00\x00\x00_metadataq\'h\x00)Rq(X\x00\x00\x00\x00q)}q*X\x07'
b'\x00\x00\x00versionq+K\x01sssb.PK\x07\x08\xab\xf1\xfb\x01\xb8\x01\x00\x00\xb8\x01'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13\x00\x07\x00lstm.save/byteorderFB\x03\x00ZZZbig'
b'PK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00'
b'?\x00lstm.save/data/0FB;\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZ\xbeJu\n\xbe\xa2*X>\x10\xea\xc4\xbe\x8d\n\xd4\xbe\x8a\x10\x1c\xbe\xe42\xb0'
b'>4\xcb,>!\x17\x00\xbe\xe0\x9cH\xbe!\x15\xd2>\xc6C6>\x89\xc5v\xbe\x81\x14\xae>\x99'
b'\xc7Z?\x01P\x90<\x9a\xb9`=< \xc0\xbe\x9e\xc7\'?\x02\xf4\xaa\xbc\x0e\xf3\x00\xbe'
b'v\xb7\xd8=\xcd\xcc\xa0>\xaf/$=K\xc4\x00\xbe\xe5\xb80\xbeU\xc5\xb6\xbe\xf3i\xc4'
b'>\xdc5\xa4>\x8dg\x06>\xae!N\xberF2\xbdbh0\xbdew\xf0<\xa0g \xbe\x9e\xbe\xb6>\xc2\xd1'
b'\x14PK\x07\x08\xc2yG\xba\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10'
b'\x002\x00lstm.save/data/1FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ>'
b'\xe1[|\xbedY\xa2\xbf\to\xa5\xbe\x05\x1cz<\xdb\xb1 >\xfc\xcd\xf0>\xcbu\xa2\xbe{\x87'
b'\x8c>^\x9b\x9c>Gm\xac>\x93\x17\xae\xbe\xf0\xc5\x8e>\xfc\x1ct\xbe\x81\x84\xcb> '
b'\xa6\xc8=\xaf\xee\x88>\x8d\xc9\n>\xee\xc5\xc0>\x91E\xf0>\xa1^\xf4>F\xbb\xb8\xbe\xfe'
b'\x97\x97?\x03\x85\xec=\xf3\x9ch>\x97\xa8\xf2?\r\xfa^\xbe\x94i6\xbew1\xbc=\x8a'
b'\xc4h\xbd\x9f\xc8\x94\xbe\x89\xb5\x81>\xb0K(\xbdz:\xf0\xbd\x9b\xc6\xb0=\x88\x00X'
b'\xbf\x11\xc7\x05PK\x07\x08\xd0\xbftD\x90\x00\x00\x00\x90\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x10\x002\x00lstm.save/data/2FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZ=f\xc2\xb0<1\xdd@\xbe\xd84\x86?\x13\t\xa0\xbe\x8fg+\xbf\r\xb1u>\xc3lb\xbe'
b'\x82\\\xa8\xbd\xf3c\xa4\xbe\xdf\x96,\xbe\xf1\x05\xfe>\x96\xc9\xf8PK\x07\x08"\xc5'
b'\xc5O0\x00\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x12\x00lstm.save/data'
b'/3FB\x0e\x00ZZZZZZZZZZZZZZ\xbei\xaa\x04\xbe\x8a\xd8\xce\xbe\xdfO\xe3\xbe\xd2\xc3'
b'$\xbe\x80\xb1\x06?\x08&^\xbd}\x1a\x00?\r\xde\x06>\xac\xe7\x04\xbe\xe9@Z>)\xc2\x14'
b'>/\xe9\x9cPK\x07\x08\xfb\xfd/\x920\x00\x00\x000\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'
b'\x00\x11\x00lstm.save/versionFB\r\x00ZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00'
b'\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xab\xf1\xfb\x01\xb8\x01\x00\x00\xb8\x01\x00\x00\x12\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00lstm.save/data.pklPK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00'
b'\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x02\x00\x00ls'
b'tm.save/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xc2y'
b'G\xba\x90\x00\x00\x00\x90\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00S\x02\x00\x00lstm.save/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\xd0\xbftD\x90\x00\x00\x00\x90\x00\x00\x00\x10\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x03\x00\x00lstm.save/data/1PK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00"\xc5\xc5O0\x00\x00\x000\x00\x00\x00'
b'\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x04\x00\x00lstm.save/'
b'data/2PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xfb\xfd/\x920\x00'
b'\x00\x000\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x05\x00\x00lstm.save/data/3PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x80\x05\x00\x00lstm.save/versionPK\x06\x06,\x00\x00\x00'
b'\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00'
b'\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x00\x00\x00'
b'\xd2\x05\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\x8a\x07\x00\x00\x00\x00'
b'\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x07\x00\x07\x00\xb8\x01\x00'
b'\x00\xd2\x05\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
lstm_le_no_bom = torch.nn.LSTM(3, 3)
lstm_le_bom = torch.nn.LSTM(3, 3)
lstm_be_no_bom = torch.nn.LSTM(3, 3)
lstm_be_bom = torch.nn.LSTM(3, 3)
lstm_le_no_bom_little = torch.nn.LSTM(3, 3)
lstm_be_no_bom_little = torch.nn.LSTM(3, 3)
lstm_le_no_bom_big = torch.nn.LSTM(3, 3)
lstm_be_no_bom_big = torch.nn.LSTM(3, 3)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
lstm_le_no_bom.load_state_dict(torch.load(buf_le_no_bom), strict=True)
lstm_be_no_bom.load_state_dict(torch.load(buf_be_no_bom), strict=True)
finally:
set_default_load_endianness(current_load_endian)
lstm_le_bom.load_state_dict(torch.load(buf_le_bom), strict=True)
lstm_be_bom.load_state_dict(torch.load(buf_be_bom), strict=True)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
lstm_le_no_bom_little.load_state_dict(torch.load(buf_le_no_bom), strict=True)
lstm_be_no_bom_little.load_state_dict(torch.load(buf_be_no_bom), strict=True)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
lstm_le_no_bom_big.load_state_dict(torch.load(buf_le_no_bom), strict=True)
lstm_be_no_bom_big.load_state_dict(torch.load(buf_be_no_bom), strict=True)
finally:
set_default_load_endianness(current_load_endian)
self.assertEqual(lstm_le_bom.state_dict(), lstm_be_bom.state_dict())
self.assertNotEqual(lstm_le_no_bom.state_dict(), lstm_be_no_bom.state_dict())
self.assertEqual(lstm_le_no_bom_little.state_dict(), lstm_le_bom.state_dict())
self.assertNotEqual(lstm_be_no_bom_little.state_dict(), lstm_be_bom.state_dict())
self.assertNotEqual(lstm_le_no_bom_big.state_dict(), lstm_le_bom.state_dict())
self.assertEqual(lstm_be_no_bom_big.state_dict(), lstm_be_bom.state_dict())
if (sys.byteorder == 'little'):
self.assertEqual(lstm_le_no_bom.state_dict(), lstm_le_bom.state_dict())
self.assertEqual(lstm_le_no_bom.state_dict(), lstm_be_bom.state_dict())
self.assertNotEqual(lstm_be_no_bom.state_dict(), lstm_le_bom.state_dict())
self.assertNotEqual(lstm_be_no_bom.state_dict(), lstm_be_bom.state_dict())
else:
self.assertNotEqual(lstm_le_no_bom.state_dict(), lstm_le_bom.state_dict())
self.assertNotEqual(lstm_le_no_bom.state_dict(), lstm_be_bom.state_dict())
self.assertEqual(lstm_be_no_bom.state_dict(), lstm_le_bom.state_dict())
self.assertEqual(lstm_be_no_bom.state_dict(), lstm_be_bom.state_dict())
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
_test_save_load_attr
|
def _test_save_load_attr(t):
t.foo = 'foo'
t.pi = 3.14
with BytesIOContext() as f:
torch.save(t, f)
f.seek(0)
loaded_t = torch.load(f)
self.assertEqual(t, loaded_t)
self.assertEqual(t.foo, loaded_t.foo)
self.assertEqual(t.pi, loaded_t.pi)
t = torch.zeros(3, 3)
_test_save_load_attr(t)
_test_save_load_attr(torch.nn.Parameter(t))
|
def _test_save_load_attr(t):
t.foo = 'foo'
t.pi = 3.14
with BytesIOContext() as f:
torch.save(t, f)
f.seek(0)
loaded_t = torch.load(f, weights_only=weights_only)
self.assertEqual(t, loaded_t)
self.assertEqual(t.foo, loaded_t.foo)
self.assertEqual(t.pi, loaded_t.pi)
t = torch.zeros(3, 3)
_test_save_load_attr(t)
_test_save_load_attr(torch.nn.Parameter(t))
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_weights_only_safe_globals_newobj
|
torch.load(f, weights_only=True)
|
def test_weights_only_safe_globals_newobj(self):
# This will use NEWOBJ
p = Point(x=1, y=2)
with BytesIOContext() as f:
torch.save(p, f)
f.seek(0)
with self.assertRaisesRegex(pickle.UnpicklingError,
"GLOBAL __main__.Point was not an allowed global by default"):
torch.load(f, weights_only=True)
f.seek(0)
try:
torch.serialization.add_safe_globals([Point])
loaded_p = torch.load(f, weights_only=True)
self.assertEqual(loaded_p, p)
finally:
torch.serialization.clear_safe_globals()
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_double
|
def test_serialization_load_bom_data_double(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randn(2,2, dtype=torch.double)
#
# torch.save(x, "tensor.double.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.double.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.double.LE.BOM.pt')
#
# torch.save(x, 'tensor.double.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.double.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x19\x00\t\x00tensor.double.LE/data.pklFB\x05\x00ZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'DoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x17\x00 \x00tensor.double.LE/data/0FB\x1c\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'\x97v\xa4\xff|^\xc9?\xce\xbc\x8cP\x8d\xb0\xe9\xbf\xdc\x0e\xef[\xb7\xdb\xd3\xbf4\xb1'
b'\x08Q\xf9\x00\xde?PK\x07\x08\xae\x92t\x0f \x00\x00\x00 \x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x18\x00\x1a\x00tensor.double.LE/versionFB\x16\x00ZZZZZZZZZZZZZZZZZZZZZZ'
b'3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00\x19\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.double'
b'.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xae\x92t\x0f'
b' \x00\x00\x00 \x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xeb\x00\x00\x00tensor.double.LE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x18\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x01\x00\x00tensor.double.LE/versionPK\x06'
b'\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xd2\x00\x00\x00'
b'\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa4\x02'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03'
b'\x00\xd2\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1d\x00\x05\x00tensor.double.LE.BOM/data.pklFB\x01\x00Z\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nDoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1e\x00\x19\x00tensor.double.LE.BOM/byteorderFB\x15\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1b\x001\x00tensor.double.LE.BOM/data/0FB-\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZ\x97v\xa4\xff|^\xc9?\xce\xbc\x8cP\x8d\xb0\xe9\xbf\xdc\x0e\xef[\xb7'
b'\xdb\xd3\xbf4\xb1\x08Q\xf9\x00\xde?PK\x07\x08\xae\x92t\x0f \x00\x00\x00 \x00\x00'
b'\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x1c\x00\x16\x00tensor.double.LE.BOM/versionFB\x12\x00ZZ'
b'ZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00S\xd3\xba&\x9b\x00\x00\x00\x9b\x00'
b'\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'tensor.double.LE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\xeb\x00\x00\x00tensor.double.LE.BOM/byteorderPK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xae\x92t\x0f '
b'\x00\x00\x00 \x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'V\x01\x00\x00tensor.double.LE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1c\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x01\x00\x00tensor.double.LE.BOM/versio'
b'nPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00*\x01\x00\x00'
b'\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00|\x03\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00'
b'*\x01\x00\x00R\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x19\x00\t\x00tensor.double.BE/data.pklFB\x05\x00ZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'DoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x17\x00 \x00tensor.double.BE/data/0FB\x1c\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'?\xc9^|\xff\xa4v\x97\xbf\xe9\xb0\x8dP\x8c\xbc\xce\xbf\xd3\xdb\xb7[\xef\x0e\xdc?\xde'
b'\x00\xf9Q\x08\xb14PK\x07\x083@\x82/ \x00\x00\x00 \x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x18\x00\x1a\x00tensor.double.BE/versionFB\x16\x00ZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07'
b'\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00\x19\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.double.BE/da'
b'ta.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x003@\x82/ '
b'\x00\x00\x00 \x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xeb\x00\x00\x00tensor.double.BE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x18\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x01\x00\x00tensor.double.BE/versionPK\x06\x06'
b',\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xd2\x00\x00\x00\x00'
b'\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa4\x02\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00'
b'\xd2\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1d\x00\x05\x00tensor.double.BE.BOM/data.pklFB\x01\x00Z\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nDoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1e\x00\x19\x00tensor.double.BE.BOM/byteorderFB\x15\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x1b\x004\x00tensor.double.BE.BOM/data/0FB0\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZ?\xc9^|\xff\xa4v\x97\xbf\xe9\xb0\x8dP\x8c\xbc\xce\xbf\xd3\xdb\xb7'
b'[\xef\x0e\xdc?\xde\x00\xf9Q\x08\xb14PK\x07\x083@\x82/ \x00\x00\x00 \x00\x00\x00'
b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x1c\x00\x16\x00tensor.double.BE.BOM/versionFB\x12\x00ZZZZZZZZ'
b'ZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00'
b'\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ten'
b'sor.double.BE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xeb\x00\x00\x00tensor.double.BE.BOM/byteorderPK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x003@\x82/ \x00\x00\x00 \x00\x00\x00'
b'\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00S\x01\x00\x00tensor.do'
b'uble.BE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1'
b'\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\xf0\x01\x00\x00tensor.double.BE.BOM/versionPK\x06\x06,\x00\x00\x00'
b'\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00'
b'\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00*\x01\x00\x00\x00\x00\x00\x00R\x02'
b'\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00|\x03\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00*\x01\x00\x00R\x02\x00\x00'
b'\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_float
|
def test_serialization_load_bom_data_float(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randn(2,2, dtype=torch.float)
#
# torch.save(x, "tensor.float.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.float.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.float.LE.BOM.pt')
#
# torch.save(x, 'tensor.float.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.float.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x18\x00\n\x00tensor.float.LE/data.pklFB\x06\x00ZZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'FloatStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05Q'
b'K\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)'
b'Rq\ttq\nRq\x0b.PK\x07\x08%Y"N\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16'
b'\x00"\x00tensor.float.LE/data/0FB\x1e\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\x01h\x9e'
b'?\r\xb7A?\x1a\x1e\x07\xbf\xd4|\x02?PK\x07\x08\x8fq]\x8c\x10\x00\x00\x00\x10\x00'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x17\x00+\x00tensor.float.LE/versionFB\'\x00ZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00%Y"N\x9a\x00\x00'
b'\x00\x9a\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00tensor.float.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\x8fq]\x8c\x10\x00\x00\x00\x10\x00\x00\x00\x16\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00tensor.float.LE/data/0PK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00'
b'\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x01\x00\x00t'
b'ensor.float.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00'
b'\x00\x00\xcf\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06'
b'\x07\x00\x00\x00\x00\xa1\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00'
b'\x00\x00\x00\x03\x00\x03\x00\xcf\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1c\x00\x06\x00tensor.float.LE.BOM/data.pklFB\x02\x00ZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nFloatStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08%Y"N\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x1d\x00\x1b\x00tensor.float.LE.BOM/byteorderFB\x17\x00ZZZZZZZZZZZZZZZZZZZZZZZl'
b'ittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a'
b'\x002\x00tensor.float.LE.BOM/data/0FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZ\x01h\x9e?\r\xb7A?\x1a\x1e\x07\xbf\xd4|\x02?PK\x07\x08\x8fq]\x8c\x10\x00'
b'\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\'\x00tensor.float.LE.BOM/ve'
b'rsionFB#\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'%Y"N\x9a\x00\x00\x00\x9a\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00tensor.float.LE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00\x1d'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00tensor.fl'
b'oat.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x8f'
b'q]\x8c\x10\x00\x00\x00\x10\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00V\x01\x00\x00tensor.float.LE.BOM/data/0PK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1b\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00tensor.float.'
b'LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00'
b'&\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00'
b'\x00x\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04'
b'\x00\x04\x00&\x01\x00\x00R\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x18\x00\n\x00tensor.float.BE/data.pklFB\x06\x00ZZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'FloatStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05Q'
b'K\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)'
b'Rq\ttq\nRq\x0b.PK\x07\x08%Y"N\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16'
b'\x00"\x00tensor.float.BE/data/0FB\x1e\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ?\x9eh'
b'\x01?A\xb7\r\xbf\x07\x1e\x1a?\x02|\xd4PK\x07\x089D\xd6\x8a\x10\x00\x00\x00\x10\x00'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x17\x00+\x00tensor.float.BE/versionFB\'\x00ZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00%Y"N\x9a\x00\x00'
b'\x00\x9a\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00tensor.float.BE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x009D\xd6\x8a\x10\x00\x00\x00\x10\x00\x00\x00\x16\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00tensor.float.BE/data/0PK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00'
b'\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x01\x00\x00t'
b'ensor.float.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00'
b'\x00\x00\xcf\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06'
b'\x07\x00\x00\x00\x00\xa1\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00'
b'\x00\x00\x00\x03\x00\x03\x00\xcf\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1c\x00\x06\x00tensor.float.BE.BOM/data.pklFB\x02\x00ZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nFloatStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08%Y"N\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x1d\x00\x1b\x00tensor.float.BE.BOM/byteorderFB\x17\x00ZZZZZZZZZZZZZZZZZZZZZZZb'
b'igPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00'
b'5\x00tensor.float.BE.BOM/data/0FB1\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZ?\x9eh\x01?A\xb7\r\xbf\x07\x1e\x1a?\x02|\xd4PK\x07\x089D\xd6\x8a\x10\x00'
b'\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\'\x00tensor.float.BE.BOM/ve'
b'rsionFB#\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'%Y"N\x9a\x00\x00\x00\x9a\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00tensor.float.BE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00\x1d'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00tensor.fl'
b'oat.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x009D'
b'\xd6\x8a\x10\x00\x00\x00\x10\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00S\x01\x00\x00tensor.float.BE.BOM/data/0PK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1b\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00tensor.float.'
b'BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00'
b'&\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00'
b'\x00x\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04'
b'\x00\x04\x00&\x01\x00\x00R\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_half
|
def test_serialization_load_bom_data_half(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randn(2,2, dtype=torch.half)
#
# torch.save(x, "tensor.half.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.half.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.half.LE.BOM.pt')
#
# torch.save(x, 'tensor.half.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.half.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.half.LE/data.pklFB\x07\x00ZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nHalfStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08E\xabQ\x8c\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x15\x00$\x00tensor.half.LE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ0'
b'\xbbf;\xcd\xbd\xab9PK\x07\x08,D\x96\x91\x08\x00\x00\x00\x08\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x16\x004\x00tensor.half.LE/versionFB0\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00E\xabQ\x8c\x99\x00\x00\x00\x99'
b'\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00tensor.half.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00,D\x96\x91\x08\x00\x00\x00\x08\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.half.LE/data/0PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00'
b'\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\x01\x00\x00tensor.ha'
b'lf.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00'
b'\xcc\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00'
b'\x00\x00\x9e\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00'
b'\x00\x03\x00\x03\x00\xcc\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.half.LE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nHalfStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08E\xabQ\x8c\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1c\x00\x1d\x00tensor.half.LE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x19\x003\x00tensor.half.LE.BOM/data/0FB/\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZ0\xbbf;\xcd\xbd\xab9PK\x07\x08,D\x96\x91\x08\x00\x00\x00\x08\x00'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x1a\x000\x00tensor.half.LE.BOM/versionFB,\x00ZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00'
b'\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00E\xabQ\x8c\x99'
b'\x00\x00\x00\x99\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00tensor.half.LE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00\x1c\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.half.LE.'
b'BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00,D\x96\x91'
b'\x08\x00\x00\x00\x08\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00V\x01\x00\x00tensor.half.LE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1a\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x01\x00\x00tensor.half.LE.BOM/ve'
b'rsionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00"\x01\x00'
b'\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00t\x03'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04'
b'\x00"\x01\x00\x00R\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.half.BE/data.pklFB\x07\x00ZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nHalfStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08E\xabQ\x8c\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x15\x00$\x00tensor.half.BE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\xbb'
b'0;f\xbd\xcd9\xabPK\x07\x08\xc7\xa1\xfd\x07\x08\x00\x00\x00\x08\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x16\x004\x00tensor.half.BE/versionFB0\x00ZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00E\xabQ\x8c\x99\x00\x00'
b'\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00tensor.half.BE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\xc7\xa1\xfd\x07\x08\x00\x00\x00\x08\x00\x00\x00\x15\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.half.BE/data/0PK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02'
b'\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\x01\x00\x00'
b'tensor.half.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06'
b'\x07\x00\x00\x00\x00\x9e\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06'
b'\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.half.BE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nHalfStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08E\xabQ\x8c\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1c\x00\x1d\x00tensor.half.BE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x19\x006\x00tensor.half.BE.BOM/data/0FB2\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZ\xbb0;f\xbd\xcd9\xabPK\x07\x08\xc7\xa1\xfd\x07\x08\x00\x00\x00\x08'
b'\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x000\x00tensor.half.BE.BOM/versionFB,\x00ZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00E\xab'
b'Q\x8c\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00tensor.half.BE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00\x1c'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.ha'
b'lf.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xc7'
b'\xa1\xfd\x07\x08\x00\x00\x00\x08\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00S\x01\x00\x00tensor.half.BE.BOM/data/0PK\x01\x02\x00\x00\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1a'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x01\x00\x00tensor.ha'
b'lf.BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00'
b'\x00"\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00'
b'\x00\x00t\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00'
b'\x04\x00\x04\x00"\x01\x00\x00R\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_long
|
def test_serialization_load_bom_data_long(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randint(-4294967295, 4294967295, [4, 4], dtype=torch.long)
#
# torch.save(x, "tensor.long.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.long.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.long.LE.BOM.pt')
#
# torch.save(x, 'tensor.long.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.long.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.long.LE/data.pklFB\x07\x00ZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nLongStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x15\x00$\x00tensor.long.LE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZl\xfa\xda\xbe\x00\x00\x00\x00GQ^\xa9\xff\xff\xff\xff\xc5\xa4\x19\xa4\x00\x00\x00'
b'\x00\xda\x9f\x04\xdd\xff\xff\xff\xff\x9b\xfc\x98\r\x00\x00\x00\x00\x8e\xb3\xb6'
b'=\x00\x00\x00\x00n}\xd2\x8f\xff\xff\xff\xff\xe2\xfe\x14u\xff\xff\xff\xff\xf1\x01'
b'T\x07\xff\xff\xff\xff\x9b\xb3"\x7f\xff\xff\xff\xff\xb2p\x07\xfc\xff\xff\xff\xff\x1f'
b'1\xa6M\x00\x00\x00\x00a\xaa|u\xff\xff\xff\xff2Y\x12;\x00\x00\x00\x00\'J\xb7\xcb'
b'\x00\x00\x00\x00m\xb2\x1c\xe1\xff\xff\xff\xffPK\x07\x08\xd5\x00\xa1r\x80\x00\x00'
b'\x00\x80\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00<\x00tensor.long.LE/versionFB8\x00'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9eg'
b'U\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.long.LE/data.pklPK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd5\x00\xa1r\x80\x00\x00\x00\x80'
b'\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00'
b'\x00tensor.long.LE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\xd0\x01\x00\x00tensor.long.LE/versionPK\x06\x06,\x00\x00'
b'\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00'
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00'
b'R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\x1e\x03\x00\x00\x00\x00'
b'\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00'
b'R\x02\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.long.LE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nLongStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1c\x00\x1d\x00tensor.long.LE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x19\x003\x00tensor.long.LE.BOM/data/0FB/\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZl\xfa\xda\xbe\x00\x00\x00\x00GQ^\xa9\xff\xff\xff\xff\xc5\xa4\x19'
b'\xa4\x00\x00\x00\x00\xda\x9f\x04\xdd\xff\xff\xff\xff\x9b\xfc\x98\r\x00\x00\x00'
b'\x00\x8e\xb3\xb6=\x00\x00\x00\x00n}\xd2\x8f\xff\xff\xff\xff\xe2\xfe\x14u\xff\xff'
b'\xff\xff\xf1\x01T\x07\xff\xff\xff\xff\x9b\xb3"\x7f\xff\xff\xff\xff\xb2p\x07\xfc'
b'\xff\xff\xff\xff\x1f1\xa6M\x00\x00\x00\x00a\xaa|u\xff\xff\xff\xff2Y\x12;\x00\x00'
b'\x00\x00\'J\xb7\xcb\x00\x00\x00\x00m\xb2\x1c\xe1\xff\xff\xff\xffPK\x07\x08\xd5\x00'
b'\xa1r\x80\x00\x00\x00\x80\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x008\x00tensor.lon'
b'g.LE.BOM/versionFB4\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK'
b'\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.long.LE.'
b'BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19'
b'\x06\x00\x00\x00\x06\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\xe9\x00\x00\x00tensor.long.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\xd5\x00\xa1r\x80\x00\x00\x00\x80\x00\x00\x00\x19'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\x01\x00\x00tensor.long.L'
b'E.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU'
b'\x02\x00\x00\x00\x02\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00P\x02\x00\x00tensor.long.LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00'
b'\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00'
b'\x04\x00\x00\x00\x00\x00\x00\x00"\x01\x00\x00\x00\x00\x00\x00\xd2\x02\x00\x00'
b'\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xf4\x03\x00\x00\x00\x00\x00\x00\x01\x00'
b'\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00"\x01\x00\x00\xd2\x02\x00\x00'
b'\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.long.BE/data.pklFB\x07\x00ZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nLongStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x15\x00$\x00tensor.long.BE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZ\x00\x00\x00\x00\xbe\xda\xfal\xff\xff\xff\xff\xa9^QG\x00\x00\x00\x00\xa4\x19\xa4'
b'\xc5\xff\xff\xff\xff\xdd\x04\x9f\xda\x00\x00\x00\x00\r\x98\xfc\x9b\x00\x00\x00'
b'\x00=\xb6\xb3\x8e\xff\xff\xff\xff\x8f\xd2}n\xff\xff\xff\xffu\x14\xfe\xe2\xff\xff'
b'\xff\xff\x07T\x01\xf1\xff\xff\xff\xff\x7f"\xb3\x9b\xff\xff\xff\xff\xfc\x07p\xb2\x00'
b'\x00\x00\x00M\xa61\x1f\xff\xff\xff\xffu|\xaaa\x00\x00\x00\x00;\x12Y2\x00\x00\x00'
b'\x00\xcb\xb7J\'\xff\xff\xff\xff\xe1\x1c\xb2mPK\x07\x08\xb9\x1b\x81j\x80\x00\x00'
b'\x00\x80\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00<\x00tensor.long.BE/versionFB8\x00'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9eg'
b'U\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.long.BE/data.pklPK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xb9\x1b\x81j\x80\x00\x00\x00\x80'
b'\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00'
b'\x00tensor.long.BE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\xd0\x01\x00\x00tensor.long.BE/versionPK\x06\x06,\x00\x00'
b'\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00'
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00'
b'R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\x1e\x03\x00\x00\x00\x00'
b'\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00'
b'R\x02\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.long.BE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nLongStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1c\x00\x1d\x00tensor.long.BE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x19\x006\x00tensor.long.BE.BOM/data/0FB2\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ\x00\x00\x00\x00\xbe\xda\xfal\xff\xff\xff\xff\xa9^QG\x00\x00\x00'
b'\x00\xa4\x19\xa4\xc5\xff\xff\xff\xff\xdd\x04\x9f\xda\x00\x00\x00\x00\r\x98\xfc'
b'\x9b\x00\x00\x00\x00=\xb6\xb3\x8e\xff\xff\xff\xff\x8f\xd2}n\xff\xff\xff\xffu\x14'
b'\xfe\xe2\xff\xff\xff\xff\x07T\x01\xf1\xff\xff\xff\xff\x7f"\xb3\x9b\xff\xff\xff\xff'
b'\xfc\x07p\xb2\x00\x00\x00\x00M\xa61\x1f\xff\xff\xff\xffu|\xaaa\x00\x00\x00\x00'
b';\x12Y2\x00\x00\x00\x00\xcb\xb7J\'\xff\xff\xff\xff\xe1\x1c\xb2mPK\x07\x08\xb9\x1b'
b'\x81j\x80\x00\x00\x00\x80\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x008\x00tensor.lon'
b'g.BE.BOM/versionFB4\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK'
b'\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00 \xbd\xd7\xb0\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.long.BE.'
b'BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3'
b'\x03\x00\x00\x00\x03\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\xe9\x00\x00\x00tensor.long.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\xb9\x1b\x81j\x80\x00\x00\x00\x80\x00\x00\x00\x19'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00S\x01\x00\x00tensor.long.B'
b'E.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU'
b'\x02\x00\x00\x00\x02\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00P\x02\x00\x00tensor.long.BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00'
b'\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00'
b'\x04\x00\x00\x00\x00\x00\x00\x00"\x01\x00\x00\x00\x00\x00\x00\xd2\x02\x00\x00'
b'\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xf4\x03\x00\x00\x00\x00\x00\x00\x01\x00'
b'\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00"\x01\x00\x00\xd2\x02\x00\x00'
b'\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_int
|
def test_serialization_load_bom_data_int(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randint(-2147483648, 2147483648, [4, 4], dtype=torch.int)
#
# torch.save(x, "tensor.int.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.int.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.int.LE.BOM.pt')
#
# torch.save(x, 'tensor.int.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.int.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x16\x00\x0c\x00tensor.int.LE/data.pklFB\x08\x00ZZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nIntStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05Q'
b'K\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)'
b'Rq\ttq\nRq\x0b.PK\x07\x08\xdd\xa0\'\xa8\x98\x00\x00\x00\x98\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x14\x00&\x00tensor.int.LE/data/0FB"\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZ\xf6\x19\x95i\xfaL\x1f\t%\xa3\r\xb8\xe5\xcfN\xe2\xa2\xc7\x8f\xb4\xfd\xf5(2\xe3'
b'YX\xf5\x1dhO}\xeb\xba\xcf\x02\x8b\x84\xdd>L\xbc(\xc7\x92Q\x98\xa6\x1aQ^w\xea\x93'
b'2>\xad\x87D\xdd\x9el\xb6\x15PK\x07\x08W\x1c\xcd\x19@\x00\x00\x00@\x00\x00\x00PK'
b'\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x15\x00=\x00tensor.int.LE/versionFB9\x00ZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00'
b'\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xdd\xa0\'\xa8'
b'\x98\x00\x00\x00\x98\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00tensor.int.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00W\x1c\xcd\x19@\x00\x00\x00@\x00\x00\x00\x14\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x00\x00\x00tensor.int.LE/data/0PK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x01'
b'\x00\x00tensor.int.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00'
b'\x00\x00\x00\x00\xc9\x00\x00\x00\x00\x00\x00\x00\x12\x02\x00\x00\x00\x00\x00\x00'
b'PK\x06\x07\x00\x00\x00\x00\xdb\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x03\x00\x03\x00\xc9\x00\x00\x00\x12\x02\x00\x00\x00\x00')
data_le_bom = (b"PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x1a\x00\x08\x00tensor.int.LE.BOM/data.pklFB\x04\x00ZZZZ\x80"
b"\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc"
b"h\nIntStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05Q"
b"K\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)"
b"Rq\ttq\nRq\x0b.PK\x07\x08\xdd\xa0'\xa8\x98\x00\x00\x00\x98\x00\x00\x00PK\x03\x04"
b"\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x1b\x00\x1f\x00tensor.int.LE.BOM/byteorderFB\x1b\x00ZZZZZZZZZZZZZZZZZZZ"
b"ZZZZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00"
b"\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x18\x004\x00tensor.int.LE.BOM/data/0FB0\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"
b"ZZZZZZZZZZZZZZZZZZZ\xf6\x19\x95i\xfaL\x1f\t%\xa3\r\xb8\xe5\xcfN\xe2\xa2\xc7\x8f\xb4"
b"\xfd\xf5(2\xe3YX\xf5\x1dhO}\xeb\xba\xcf\x02\x8b\x84\xdd>L\xbc(\xc7\x92Q\x98\xa6"
b"\x1aQ^w\xea\x932>\xad\x87D\xdd\x9el\xb6\x15PK\x07\x08W\x1c\xcd\x19@\x00\x00\x00"
b"@\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x19\x009\x00tensor.int.LE.BOM/versionFB5\x00ZZZ"
b"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00"
b"\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00"
b"\xdd\xa0'\xa8\x98\x00\x00\x00\x98\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.int.LE.BOM/data.pklPK\x01\x02\x00"
b"\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00"
b"\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x00\x00\x00"
b"tensor.int.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00"
b"\x00W\x1c\xcd\x19@\x00\x00\x00@\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00V\x01\x00\x00tensor.int.LE.BOM/data/0PK\x01\x02\x00\x00\x00"
b"\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x19"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x02\x00\x00tensor.int"
b".LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00"
b"\x1e\x01\x00\x00\x00\x00\x00\x00\x92\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00"
b"\x00\x00\x00\xb0\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00"
b"\x00\x04\x00\x04\x00\x1e\x01\x00\x00\x92\x02\x00\x00\x00\x00")
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x16\x00\x0c\x00tensor.int.BE/data.pklFB\x08\x00ZZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nIntStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05Q'
b'K\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)'
b'Rq\ttq\nRq\x0b.PK\x07\x08\xdd\xa0\'\xa8\x98\x00\x00\x00\x98\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x14\x00&\x00tensor.int.BE/data/0FB"\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZi\x95\x19\xf6\t\x1fL\xfa\xb8\r\xa3%\xe2N\xcf\xe5\xb4\x8f\xc7\xa22(\xf5\xfd\xf5'
b'XY\xe3}Oh\x1d\x02\xcf\xba\xeb>\xdd\x84\x8b\xc7(\xbcL\xa6\x98Q\x92w^Q\x1a>2\x93\xea'
b'\xddD\x87\xad\x15\xb6l\x9ePK\x07\x08rq\x19^@\x00\x00\x00@\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x15\x00=\x00tensor.int.BE/versionFB9\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xdd\xa0\'\xa8\x98\x00'
b'\x00\x00\x98\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00tensor.int.BE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00rq\x19^@\x00\x00\x00@\x00\x00\x00\x14\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\xe8\x00\x00\x00tensor.int.BE/data/0PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x01\x00\x00tens'
b'or.int.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00'
b'\x00\xc9\x00\x00\x00\x00\x00\x00\x00\x12\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00'
b'\x00\x00\x00\xdb\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00'
b'\x00\x00\x03\x00\x03\x00\xc9\x00\x00\x00\x12\x02\x00\x00\x00\x00')
data_be_bom = (b"PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x1a\x00\x08\x00tensor.int.BE.BOM/data.pklFB\x04\x00ZZZZ\x80"
b"\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc"
b"h\nIntStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05Q"
b"K\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)"
b"Rq\ttq\nRq\x0b.PK\x07\x08\xdd\xa0'\xa8\x98\x00\x00\x00\x98\x00\x00\x00PK\x03\x04"
b"\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x1b\x00\x1f\x00tensor.int.BE.BOM/byteorderFB\x1b\x00ZZZZZZZZZZZZZZZZZZZ"
b"ZZZZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00"
b"\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x18\x007\x00tensor.int.BE.BOM/data/0FB3\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"
b"ZZZZZZZZZZZZZZZZZZZi\x95\x19\xf6\t\x1fL\xfa\xb8\r\xa3%\xe2N\xcf\xe5\xb4\x8f\xc7\xa2"
b"2(\xf5\xfd\xf5XY\xe3}Oh\x1d\x02\xcf\xba\xeb>\xdd\x84\x8b\xc7(\xbcL\xa6\x98Q\x92"
b"w^Q\x1a>2\x93\xea\xddD\x87\xad\x15\xb6l\x9ePK\x07\x08rq\x19^@\x00\x00\x00@\x00"
b"\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x19\x009\x00tensor.int.BE.BOM/versionFB5\x00ZZZZZZZZZ"
b"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00"
b"\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xdd"
b"\xa0'\xa8\x98\x00\x00\x00\x98\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00tensor.int.BE.BOM/data.pklPK\x01\x02\x00\x00\x00"
b"\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00"
b"\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x00\x00\x00tenso"
b"r.int.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00"
b"rq\x19^@\x00\x00\x00@\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00S\x01\x00\x00tensor.int.BE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08"
b"\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x19\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x02\x00\x00tensor.int.BE.BOM/vers"
b"ionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x1e\x01\x00"
b"\x00\x00\x00\x00\x00\x92\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00"
b"\xb0\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00"
b"\x04\x00\x1e\x01\x00\x00\x92\x02\x00\x00\x00\x00")
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_int16
|
def test_serialization_load_bom_data_int16(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randint(-32768, 32768, [4, 4], dtype=torch.int16)
#
# torch.save(x, "tensor.int16.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.int16.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.int16.LE.BOM.pt')
#
# torch.save(x, 'tensor.int16.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.int16.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x18\x00\n\x00tensor.int16.LE/data.pklFB\x06\x00ZZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'ShortStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05Q'
b'K\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)'
b'Rq\ttq\nRq\x0b.PK\x07\x08\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x16\x00"\x00tensor.int16.LE/data/0FB\x1e\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZO\xa4\x9bJ_Z-\xa5#\xf1y\xef\xb1@\x061"\xe3\x83\x07;\x83\x80\x08\xf1\x18q\xf6\xfe'
b'\xf3\xc9,PK\x07\x08\xa0\x98\xd9\xdf \x00\x00\x00 \x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x17\x00\x1b\x00tensor.int16.LE/versionFB\x17\x00ZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07'
b'\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00\x00\x18\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.int16.LE/'
b'data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xa0\x98\xd9\xdf'
b' \x00\x00\x00 \x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xea\x00\x00\x00tensor.int16.LE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x17\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x01\x00\x00tensor.int16.LE/versionPK\x06'
b'\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00'
b'\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa1\x02'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00'
b'\xcf\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1c\x00\x06\x00tensor.int16.LE.BOM/data.pklFB\x02\x00ZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nShortStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1d\x00\x1b\x00tensor.int16.LE.BOM/byteorderFB\x17\x00ZZZZZZZZZZZZZZZ'
b'ZZZZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1a\x002\x00tensor.int16.LE.BOM/data/0FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZO\xa4\x9bJ_Z-\xa5#\xf1y\xef\xb1@\x061"\xe3\x83\x07;\x83\x80\x08'
b'\xf1\x18q\xf6\xfe\xf3\xc9,PK\x07\x08\xa0\x98\xd9\xdf \x00\x00\x00 \x00\x00\x00'
b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x1b\x00\x17\x00tensor.int16.LE.BOM/versionFB\x13\x00ZZZZZZZZZ'
b'ZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00'
b'\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'tensor.int16.LE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00tensor.int16.LE.BOM/byteorderPK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xa0\x98\xd9\xdf \x00\x00\x00 '
b'\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\x01\x00\x00'
b'tensor.int16.LE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\xf0\x01\x00\x00tensor.int16.LE.BOM/versionPK\x06\x06,\x00'
b'\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00'
b'\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00&\x01\x00\x00\x00\x00\x00\x00'
b'R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00x\x03\x00\x00\x00\x00\x00'
b'\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00&\x01\x00\x00R\x02'
b'\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x18\x00\n\x00tensor.int16.BE/data.pklFB\x06\x00ZZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'ShortStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05Q'
b'K\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)'
b'Rq\ttq\nRq\x0b.PK\x07\x08\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x16\x00"\x00tensor.int16.BE/data/0FB\x1e\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZ\xa4OJ\x9bZ_\xa5-\xf1#\xefy@\xb11\x06\xe3"\x07\x83\x83;\x08\x80\x18\xf1\xf6q\xf3'
b'\xfe,\xc9PK\x07\x08\x8a\xeb\x9b[ \x00\x00\x00 \x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x17'
b'\x00\x1b\x00tensor.int16.BE/versionFB\x17\x00ZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07'
b'\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00\x00\x18\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.int16.BE/dat'
b'a.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x8a\xeb\x9b[ '
b'\x00\x00\x00 \x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xea\x00\x00\x00tensor.int16.BE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x17\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00p\x01\x00\x00tensor.int16.BE/versionPK\x06\x06'
b',\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00'
b'\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00'
b'\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa1\x02\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcf'
b'\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1c\x00\x06\x00tensor.int16.BE.BOM/data.pklFB\x02\x00ZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nShortStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1d\x00\x1b\x00tensor.int16.BE.BOM/byteorderFB\x17\x00ZZZZZZZZZZZZZZZ'
b'ZZZZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1a\x005\x00tensor.int16.BE.BOM/data/0FB1\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ\xa4OJ\x9bZ_\xa5-\xf1#\xefy@\xb11\x06\xe3"\x07\x83\x83;\x08\x80'
b'\x18\xf1\xf6q\xf3\xfe,\xc9PK\x07\x08\x8a\xeb\x9b[ \x00\x00\x00 \x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x1b\x00\x17\x00tensor.int16.BE.BOM/versionFB\x13\x00ZZZZZZZZZZZZ'
b'ZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xf6\xc8K\xd8\x9a\x00\x00\x00\x9a\x00\x00'
b'\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ten'
b'sor.int16.BE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xea\x00\x00\x00tensor.int16.BE.BOM/byteorderPK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x8a\xeb\x9b[ \x00\x00\x00 \x00\x00'
b'\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00S\x01\x00\x00tenso'
b'r.int16.BE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1'
b'\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\xf0\x01\x00\x00tensor.int16.BE.BOM/versionPK\x06\x06,\x00\x00\x00'
b'\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00'
b'\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00&\x01\x00\x00\x00\x00\x00\x00R\x02'
b'\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00x\x03\x00\x00\x00\x00\x00\x00'
b'\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00&\x01\x00\x00R\x02\x00'
b'\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_int8
|
def test_serialization_load_bom_data_int8(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randint(-128, 128, [4, 4], dtype=torch.int8)
#
# torch.save(x, "tensor.int8.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.int8.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.int8.LE.BOM.pt')
#
# torch.save(x, 'tensor.int8.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.int8.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.int8.LE/data.pklFB\x07\x00ZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nCharStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xdb6\x08\xe7\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x15\x00$\x00tensor.int8.LE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZ\x9d\x1en\xb4\xe0l"s\x15bs\x8aa\xa0\xc6+PK\x07\x08\xe0\xffgs\x10\x00\x00\x00\x10'
b'\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00,\x00tensor.int8.LE/versionFB(\x00ZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00'
b'\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xdb6\x08\xe7'
b'\x99\x00\x00\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00tensor.int8.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\xe0\xffgs\x10\x00\x00\x00\x10\x00\x00\x00\x15\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.int8.LE/data/0'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x01'
b'\x00\x00tensor.int8.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00'
b'\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00'
b'PK\x06\x07\x00\x00\x00\x00\x9e\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.int8.LE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nCharStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xdb6\x08\xe7\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1c\x00\x1d\x00tensor.int8.LE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x19\x003\x00tensor.int8.LE.BOM/data/0FB/\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ\x9d\x1en\xb4\xe0l"s\x15bs\x8aa\xa0\xc6+PK\x07\x08\xe0\xffgs\x10'
b'\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00(\x00tensor.int8.LE.BOM'
b'/versionFB$\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00'
b'\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xdb6\x08\xe7\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.int8.LE.BOM/data.pklPK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00'
b'\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00'
b'tensor.int8.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\xe0\xffgs\x10\x00\x00\x00\x10\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00V\x01\x00\x00tensor.int8.LE.BOM/data/0PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00ten'
b'sor.int8.LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00'
b'\x00\x00"\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00'
b'\x00\x00\x00t\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00'
b'\x00\x04\x00\x04\x00"\x01\x00\x00R\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.int8.BE/data.pklFB\x07\x00ZZZZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nCharStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xdb6\x08\xe7\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x15\x00$\x00tensor.int8.BE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZ\x9d\x1en\xb4\xe0l"s\x15bs\x8aa\xa0\xc6+PK\x07\x08\xe0\xffgs\x10\x00\x00\x00\x10'
b'\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00,\x00tensor.int8.BE/versionFB(\x00ZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00'
b'\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xdb6\x08\xe7'
b'\x99\x00\x00\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00tensor.int8.BE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\xe0\xffgs\x10\x00\x00\x00\x10\x00\x00\x00\x15\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.int8.BE/data/0'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\x01'
b'\x00\x00tensor.int8.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00'
b'\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00'
b'PK\x06\x07\x00\x00\x00\x00\x9e\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.int8.BE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nCharStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xdb6\x08\xe7\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1c\x00\x1d\x00tensor.int8.BE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x19\x006\x00tensor.int8.BE.BOM/data/0FB2\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ\x9d\x1en\xb4\xe0l"s\x15bs\x8aa\xa0\xc6+PK\x07\x08\xe0\xffgs\x10'
b'\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00(\x00tensor.int8.BE.BOM'
b'/versionFB$\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00'
b'\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\xdb6\x08\xe7\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.int8.BE.BOM/data.pklPK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00'
b'\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00'
b'tensor.int8.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\xe0\xffgs\x10\x00\x00\x00\x10\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00S\x01\x00\x00tensor.int8.BE.BOM/data/0PK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00ten'
b'sor.int8.BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00'
b'\x00\x00"\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00'
b'\x00\x00\x00t\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00'
b'\x00\x04\x00\x04\x00"\x01\x00\x00R\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
# 1-byte types are same on BE and LE
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_uint8
|
def test_serialization_load_bom_data_uint8(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randint(0, 256, [4, 4], dtype=torch.uint8)
#
# torch.save(x, "tensor.uint8.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.uint8.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.uint8.LE.BOM.pt')
#
# torch.save(x, 'tensor.uint8.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.uint8.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x18\x00\n\x00tensor.uint8.LE/data.pklFB\x06\x00ZZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'ByteStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05QK'
b'\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)R'
b'q\ttq\nRq\x0b.PK\x07\x08\xff\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x16\x00#\x00tensor.uint8.LE/data/0FB\x1f\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZ\xf7\xf20\x04\t\x8a!\xbev\xf4\xbe\x0e";\xbb\tPK\x07\x08\xa8\x94#\x08\x10\x00\x00'
b'\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x17\x00+\x00tensor.uint8.LE/versionFB\''
b'\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xff'
b'\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00tensor.uint8.LE/data.pklPK\x01\x02\x00\x00\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\xa8\x94#\x08\x10\x00\x00\x00\x10\x00\x00\x00'
b'\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.'
b'uint8.LE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9e'
b'gU\x02\x00\x00\x00\x02\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00`\x01\x00\x00tensor.uint8.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00'
b'\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00'
b'\x03\x00\x00\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00'
b'\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa1\x02\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcf\x00\x00\x00\xd2\x01\x00'
b'\x00\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1c\x00\x06\x00tensor.uint8.LE.BOM/data.pklFB\x02\x00ZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nByteStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xff\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1d\x00\x1c\x00tensor.uint8.LE.BOM/byteorderFB\x18\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1a\x002\x00tensor.uint8.LE.BOM/data/0FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ\xf7\xf20\x04\t\x8a!\xbev\xf4\xbe\x0e";\xbb\tPK\x07\x08\xa8\x94'
b'#\x08\x10\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\'\x00tensor.ui'
b'nt8.LE.BOM/versionFB#\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9e'
b'gU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\xff\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00\x1c\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.uint8.LE.BOM/data.pklPK'
b'\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00'
b'\x00\x06\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9'
b'\x00\x00\x00tensor.uint8.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\xa8\x94#\x08\x10\x00\x00\x00\x10\x00\x00\x00\x1a\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\x01\x00\x00tensor.uint8.LE.BOM/data/0'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0'
b'\x01\x00\x00tensor.uint8.LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e'
b'\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00'
b'\x00\x00\x00\x00\x00\x00&\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00'
b'\x00PK\x06\x07\x00\x00\x00\x00x\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x04\x00\x04\x00&\x01\x00\x00R\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x18\x00\n\x00tensor.uint8.BE/data.pklFB\x06\x00ZZZZZZ\x80\x02'
b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
b'ByteStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05QK'
b'\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)R'
b'q\ttq\nRq\x0b.PK\x07\x08\xff\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x16\x00#\x00tensor.uint8.BE/data/0FB\x1f\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZ\xf7\xf20\x04\t\x8a!\xbev\xf4\xbe\x0e";\xbb\tPK\x07\x08\xa8\x94#\x08\x10\x00\x00'
b'\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x17\x00+\x00tensor.uint8.BE/versionFB\''
b'\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xff'
b'\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00tensor.uint8.BE/data.pklPK\x01\x02\x00\x00\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\xa8\x94#\x08\x10\x00\x00\x00\x10\x00\x00\x00'
b'\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.'
b'uint8.BE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9e'
b'gU\x02\x00\x00\x00\x02\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00`\x01\x00\x00tensor.uint8.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00'
b'\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00'
b'\x03\x00\x00\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00'
b'\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa1\x02\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcf\x00\x00\x00\xd2\x01\x00'
b'\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1c\x00\x06\x00tensor.uint8.BE.BOM/data.pklFB\x02\x00ZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nByteStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\xff\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x1d\x00\x1c\x00tensor.uint8.BE.BOM/byteorderFB\x18\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1a\x005\x00tensor.uint8.BE.BOM/data/0FB1\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZ\xf7\xf20\x04\t\x8a!\xbev\xf4\xbe\x0e";\xbb\tPK\x07\x08\xa8\x94'
b'#\x08\x10\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\'\x00tensor.ui'
b'nt8.BE.BOM/versionFB#\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9e'
b'gU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\xff\xb9!\x97\x99\x00\x00\x00\x99\x00\x00\x00\x1c\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.uint8.BE.BOM/data.pklPK'
b'\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00'
b'\x00\x03\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9'
b'\x00\x00\x00tensor.uint8.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\xa8\x94#\x08\x10\x00\x00\x00\x10\x00\x00\x00\x1a\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00S\x01\x00\x00tensor.uint8.BE.BOM/data/0'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0'
b'\x01\x00\x00tensor.uint8.BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e'
b'\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00'
b'\x00\x00\x00\x00\x00\x00&\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00'
b'\x00PK\x06\x07\x00\x00\x00\x00x\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x04\x00\x04\x00&\x01\x00\x00R\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
# 1-byte types are same on BE and LE
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_bool
|
def test_serialization_load_bom_data_bool(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randint(0, 2, [4, 4], dtype=torch.bool)
#
# torch.save(x, "tensor.bool.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.bool.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.bool.LE.BOM.pt')
#
# torch.save(x, 'tensor.bool.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.bool.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b"PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.bool.LE/data.pklFB\x07\x00ZZZZZZZ\x80"
b"\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc"
b"h\nBoolStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05"
b"QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08"
b")Rq\ttq\nRq\x0b.PK\x07\x08\x9a\xab='\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00"
b"\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x15\x00$\x00tensor.bool.LE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\x01"
b"\x00\x00\x01\x00\x01\x00\x00\x00\x00\x01\x00\x01\x00\x01\x00PK\x07\x08\x00Y04"
b"\x10\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00,\x00tensor.bool.LE/ve"
b"rsionFB(\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00"
b"\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00"
b"\x00\x9a\xab='\x99\x00\x00\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.bool.LE/data.pklPK\x01\x02\x00\x00"
b"\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00Y04\x10\x00\x00\x00\x10\x00\x00\x00\x15"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.bo"
b"ol.LE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU"
b"\x02\x00\x00\x00\x02\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00`\x01\x00\x00tensor.bool.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00"
b"\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03"
b"\x00\x00\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00"
b"\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\x9e\x02\x00\x00\x00\x00\x00\x00\x01\x00"
b"\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00\xd2\x01\x00\x00"
b"\x00\x00")
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.bool.LE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nBoolStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\x9a\xab=\'\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1c\x00\x1d\x00tensor.bool.LE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZlittlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x19\x003\x00tensor.bool.LE.BOM/data/0FB/\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZ\x01\x00\x00\x01\x00\x01\x00\x00\x00\x00\x01\x00\x01\x00\x01\x00'
b'PK\x07\x08\x00Y04\x10\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00(\x00'
b'tensor.bool.LE.BOM/versionFB$\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08'
b'\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x9a\xab=\'\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.bool.LE.BOM/dat'
b'a.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06'
b'\x00\x00\x00\x06\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xe9\x00\x00\x00tensor.bool.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\x00Y04\x10\x00\x00\x00\x10\x00\x00\x00\x19\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\x01\x00\x00tensor.bool.LE.BOM/data/0P'
b'K\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01'
b'\x00\x00tensor.bool.LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e'
b'\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00'
b'\x00\x00\x00\x00\x00\x00"\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00'
b'PK\x06\x07\x00\x00\x00\x00t\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x04\x00\x04\x00"\x01\x00\x00R\x02\x00\x00\x00\x00')
data_be_no_bom = (b"PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x17\x00\x0b\x00tensor.bool.BE/data.pklFB\x07\x00ZZZZZZZ\x80"
b"\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc"
b"h\nBoolStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05"
b"QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08"
b")Rq\ttq\nRq\x0b.PK\x07\x08\x9a\xab='\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00"
b"\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x15\x00$\x00tensor.bool.BE/data/0FB \x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\x01"
b"\x00\x00\x01\x00\x01\x00\x00\x00\x00\x01\x00\x01\x00\x01\x00PK\x07\x08\x00Y04"
b"\x10\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00,\x00tensor.bool.BE/ve"
b"rsionFB(\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00"
b"\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00"
b"\x00\x9a\xab='\x99\x00\x00\x00\x99\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.bool.BE/data.pklPK\x01\x02\x00\x00"
b"\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00Y04\x10\x00\x00\x00\x10\x00\x00\x00\x15"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00tensor.bo"
b"ol.BE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU"
b"\x02\x00\x00\x00\x02\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00`\x01\x00\x00tensor.bool.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00"
b"\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03"
b"\x00\x00\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00"
b"\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\x9e\x02\x00\x00\x00\x00\x00\x00\x01\x00"
b"\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xcc\x00\x00\x00\xd2\x01\x00\x00"
b"\x00\x00")
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.bool.BE.BOM/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nBoolStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x10tq\x05'
b'QK\x00K\x04K\x04\x86q\x06K\x04K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
b')Rq\ttq\nRq\x0b.PK\x07\x08\x9a\xab=\'\x99\x00\x00\x00\x99\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1c\x00\x1d\x00tensor.bool.BE.BOM/byteorderFB\x19\x00ZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZbigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x19\x006\x00tensor.bool.BE.BOM/data/0FB2\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZ\x01\x00\x00\x01\x00\x01\x00\x00\x00\x00\x01\x00\x01\x00\x01\x00'
b'PK\x07\x08\x00Y04\x10\x00\x00\x00\x10\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00(\x00'
b'tensor.bool.BE.BOM/versionFB$\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08'
b'\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x9a\xab=\'\x99\x00\x00\x00\x99\x00\x00\x00\x1b\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.bool.BE.BOM/dat'
b'a.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03'
b'\x00\x00\x00\x03\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xe9\x00\x00\x00tensor.bool.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\x00Y04\x10\x00\x00\x00\x10\x00\x00\x00\x19\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00S\x01\x00\x00tensor.bool.BE.BOM/data/0P'
b'K\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00'
b'\x02\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01'
b'\x00\x00tensor.bool.BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e'
b'\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00'
b'\x00\x00\x00\x00\x00\x00"\x01\x00\x00\x00\x00\x00\x00R\x02\x00\x00\x00\x00\x00\x00'
b'PK\x06\x07\x00\x00\x00\x00t\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05'
b'\x06\x00\x00\x00\x00\x04\x00\x04\x00"\x01\x00\x00R\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
# 1-byte types are same on BE and LE
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_bfloat16
|
def test_serialization_load_bom_data_bfloat16(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randn(2,2, dtype=torch.bfloat16)
#
# torch.save(x, "tensor.bfloat16.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.bfloat16.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.bfloat16.LE.BOM.pt')
#
# torch.save(x, 'tensor.bfloat16.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.bfloat16.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.bfloat16.LE/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nBFloat16Storage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq'
b'\x05QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq'
b'\x08)Rq\ttq\nRq\x0b.PK\x07\x08\x1f>\xd9\x7f\x9d\x00\x00\x00\x9d\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x19\x00\x1c\x00tensor.bfloat16.LE/data/0FB\x18\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZ\r@i\xber?\xbc\xbfPK\x07\x085\xd2\x8f\xc7\x08\x00\x00\x00\x08\x00\x00\x00'
b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x1a\x000\x00tensor.bfloat16.LE/versionFB,\x00ZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x1f>\xd9\x7f\x9d\x00'
b'\x00\x00\x9d\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00tensor.bfloat16.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x005\xd2\x8f\xc7\x08\x00\x00\x00\x08\x00\x00\x00\x19\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xed\x00\x00\x00tensor.bfloat16.LE/'
b'data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00'
b'\x00\x00\x02\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'X\x01\x00\x00tensor.bfloat16.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00'
b'\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00'
b'\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xaa\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00'
b'\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xd8\x00\x00\x00\xd2\x01\x00\x00'
b'\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1f\x00C\x00tensor.bfloat16.LE.BOM/data.pklFB?\x00ZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\x80\x02ctorch._utils\n_re'
b'build_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\nBFloat16Storage\nq\x02'
b'X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05QK\x00K\x02K\x02\x86'
b'q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)Rq\ttq\nRq\x0b.PK'
b'\x07\x08\x1f>\xd9\x7f\x9d\x00\x00\x00\x9d\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x15'
b'\x00tensor.bfloat16.LE.BOM/byteorderFB\x11\x00ZZZZZZZZZZZZZZZZZlittlePK\x07\x08\x85'
b'=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x00/\x00tenso'
b'r.bfloat16.LE.BOM/data/0FB+\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\r@i\xbe'
b'r?\xbc\xbfPK\x07\x085\xd2\x8f\xc7\x08\x00\x00\x00\x08\x00\x00\x00PK\x03\x04\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x1e\x00,\x00tensor.bfloat16.LE.BOM/versionFB(\x00ZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02'
b'\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x1f>\xd9\x7f\x9d\x00\x00\x00\x9d'
b'\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00tensor.bfloat16.LE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00 \x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00-\x01\x00\x00tensor.bfloat16.LE.BOM/byteorderPK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x005\xd2\x8f\xc7\x08\x00\x00'
b'\x00\x08\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96'
b'\x01\x00\x00tensor.bfloat16.LE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00'
b'\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1e\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x02\x00\x00tensor.bfloat16.LE.BOM/vers'
b'ionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x002\x01\x00'
b'\x00\x00\x00\x00\x00\x92\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xc4'
b'\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00'
b'\x04\x002\x01\x00\x00\x92\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1b\x00\x07\x00tensor.bfloat16.BE/data.pklFB\x03\x00ZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nBFloat16Storage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq'
b'\x05QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq'
b'\x08)Rq\ttq\nRq\x0b.PK\x07\x08\x1f>\xd9\x7f\x9d\x00\x00\x00\x9d\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x19\x00\x1c\x00tensor.bfloat16.BE/data/0FB\x18\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZ@\r\xbei?r\xbf\xbcPK\x07\x08d\x02=\xc7\x08\x00\x00\x00\x08\x00\x00\x00PK'
b'\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x1a\x000\x00tensor.bfloat16.BE/versionFB,\x00ZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x1f>\xd9\x7f\x9d\x00'
b'\x00\x00\x9d\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00tensor.bfloat16.BE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00d\x02=\xc7\x08\x00\x00\x00\x08\x00\x00\x00\x19\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xed\x00\x00\x00tensor.bfloat16.BE/data/0'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\x01'
b'\x00\x00tensor.bfloat16.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03'
b'-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00'
b'\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00'
b'\x00PK\x06\x07\x00\x00\x00\x00\xaa\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00'
b'PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xd8\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1f\x00C\x00tensor.bfloat16.BE.BOM/data.pklFB?\x00ZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\x80\x02ctorch._utils\n_re'
b'build_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\nBFloat16Storage\nq\x02'
b'X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05QK\x00K\x02K\x02\x86'
b'q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08)Rq\ttq\nRq\x0b.PK'
b'\x07\x08\x1f>\xd9\x7f\x9d\x00\x00\x00\x9d\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x15'
b'\x00tensor.bfloat16.BE.BOM/byteorderFB\x11\x00ZZZZZZZZZZZZZZZZZbigPK\x07\x08I\xe2'
b'\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x002\x00tensor.b'
b'float16.BE.BOM/data/0FB.\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ@\r\xbe'
b'i?r\xbf\xbcPK\x07\x08d\x02=\xc7\x08\x00\x00\x00\x08\x00\x00\x00PK\x03\x04\x00\x00'
b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x1e\x00,\x00tensor.bfloat16.BE.BOM/versionFB(\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x1f>\xd9\x7f\x9d\x00\x00\x00\x9d\x00'
b'\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'tensor.bfloat16.BE.BOM/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00'
b'\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00 \x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00-\x01\x00\x00tensor.bfloat16.BE.BOM/byteorderPK\x01'
b'\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00d\x02=\xc7\x08\x00\x00\x00\x08'
b'\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x93\x01\x00'
b'\x00tensor.bfloat16.BE.BOM/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00'
b'\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x18\x02\x00\x00tensor.bfloat16.BE.BOM/versionPK\x06'
b'\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x002\x01\x00\x00\x00'
b'\x00\x00\x00\x92\x02\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xc4\x03'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00'
b'2\x01\x00\x00\x92\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
fn
|
def fn(t):
return converter.from_real_tensor(mode, t) if materialize_fake else t
sd = {'t_v2': fn(t_v2), 't_v3': fn(t_v3), 'tt': fn(tt)}
sd_expected = {
't_v2': torch.zeros(2, 3, device=t_device),
't_v3': torch.zeros(2, 3, dtype=torch.complex32, device=t_device),
'tt': TwoTensor(torch.zeros(2, device=t_device), torch.zeros(2, device=t_device)),
}
if not materialize_fake:
sd['st'] = st
sd_expected['st'] = torch.sparse_coo_tensor(torch.zeros(2, 3), torch.zeros(3), (2, 4))
with BytesIOContext() as f:
with skip_data(materialize_fake_tensors=materialize_fake):
torch.save(sd, f)
f.seek(0)
with safe_globals([TwoTensor]):
sd_loaded = torch.load(f, weights_only=True)
self.assertEqual(sd_loaded, sd_expected, exact_device=True)
self.assertFalse(getattr(torch.serialization._serialization_tls, "materialize_fake_tensors", False))
self.assertFalse(getattr(torch.serialization._serialization_tls, "skip_data", False))
# Test that without materialize_fake_tensor, behavior for fake_tensors is not altered by ctx
if not materialize_fake:
ft = converter.from_real_tensor(mode, torch.randn(2, device=t_device))
with self.assertRaisesRegex(
AttributeError,
"Can't (get|pickle) local object 'WeakValueDictionary.__init__.<locals>.remove'"
):
with skip_data(), BytesIOContext() as f:
torch.save(ft, f)
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_skip_data_serialization_preserves_views
|
def test_skip_data_serialization_preserves_views(self, materialize_fake):
ctx = FakeTensorMode if materialize_fake else contextlib.nullcontext
with ctx():
t = torch.randn(2, 3)
t_view = t.view(-1)
t_slice = t[1]
sd = {'t': t, 't_view': t_view, 't_slice': t_slice}
with BytesIOContext() as f:
with skip_data(materialize_fake_tensors=materialize_fake):
torch.save(sd, f)
f.seek(0)
sd_loaded = torch.load(f, weights_only=True)
self.assertTrue(id(sd_loaded['t_view'].untyped_storage()) == id(sd_loaded['t'].untyped_storage()))
self.assertTrue(id(sd_loaded['t_slice'].untyped_storage()) == id(sd_loaded['t'].untyped_storage()))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
_save_load
|
def _save_load(t):
with BytesIOContext() as f:
with skip_data():
torch.save(t, f)
f.seek(0)
torch.load(f, weights_only=True)
nt = torch.nested.nested_tensor([torch.randn(2), torch.randn(3)])
t = torch.randn(2, 3, device="meta")
with self.assertRaisesRegex(RuntimeError, "Cannot serialize nested tensor under skip_data context manager"):
_save_load(nt)
with self.assertWarnsRegex(UserWarning, "meta device under skip_data context manager is a no-op"):
_save_load(t)
with self.assertRaisesRegex(RuntimeError, "Please call torch.load outside the skip_data context manager"):
with skip_data(), BytesIOContext() as f:
torch.save(torch.randn(2, 3), f)
f.seek(0)
torch.load(f, weights_only=True)
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_tensor_subclass_wrapper_serialization
|
def test_tensor_subclass_wrapper_serialization(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestWrapperSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
with BytesIOContext() as f:
torch.save(my_tensor, f)
f.seek(0)
new_tensor = torch.load(f)
self.assertIsInstance(new_tensor, TestWrapperSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
|
def test_tensor_subclass_wrapper_serialization(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestWrapperSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
with BytesIOContext() as f:
torch.save(my_tensor, f)
f.seek(0)
with safe_globals([TestWrapperSubclass]):
new_tensor = torch.load(f)
self.assertIsInstance(new_tensor, TestWrapperSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
class TestSubclassSerialization(TestCase):
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSubclassSerialization(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_serialization_load_bom_data_cdouble
|
def test_serialization_load_bom_data_cdouble(self):
# 1. Generated on LE system using following commands:
#
# import torch
#
# x = torch.randn(2,2, dtype=torch.cdouble)
#
# torch.save(x, "tensor.cdouble.LE.pt", _disable_byteorder_record=True)
# torch.save(x, "tensor.cdouble.LE.BOM.pt")
#
# print(x)
#
# 2. After that it is resaved on BE system with following commands:
#
# import torch
#
# x = torch.load('tensor.cdouble.LE.BOM.pt')
#
# torch.save(x, 'tensor.cdouble.BE.pt', _disable_byteorder_record=True)
# torch.save(x, 'tensor.cdouble.BE.BOM.pt')
#
# print(x)
#
# Following commands and a bit of manual work were used to produce python bytes from resulting files:
#
# file = open('filename', 'rb')
# data = file.read()
# file.close()
# print("\n".join(textwrap.wrap(str(data), 80)))
#
# BOM in this context is used as Byte Order Mark.
#
data_le_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1a\x00\x08\x00tensor.cdouble.LE/data.pklFB\x04\x00ZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nComplexDoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04'
b'tq\x05QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDi'
b'ct\nq\x08)Rq\ttq\nRq\x0b.PK\x07\x08(W{\xca\xa2\x00\x00\x00\xa2\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x18\x00\x18\x00tensor.cdouble.LE/data/0FB\x14\x00ZZZZZZZZZZZZZZZZZZ'
b'ZZ\xd1/\x84\xd8,\x00\xcd\xbf|L\xcf\xd0O\xee\xd7\xbfb\xb6<\xb4\xe2_\xec?v+\x86\xd9'
b'\xca\x0e\xf8?i#\xbb\xfcU\x1b\xe0\xbf\x984\xcd\x02q\x8a\xe9?\xc1_\xd7R\xe3\xfb\xe3'
b'\xbf\xcf\xce>\xcd\xa2\x9f\xe8?PK\x07\x08\x1d\xed\xed\xa0@\x00\x00\x00@\x00\x00'
b'\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x19\x009\x00tensor.cdouble.LE/versionFB5\x00ZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02'
b'\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00(W{\xca'
b'\xa2\x00\x00\x00\xa2\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00tensor.cdouble.LE/data.pklPK\x01\x02\x00\x00\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x1d\xed\xed\xa0@\x00\x00\x00@\x00\x00\x00\x18\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00\x00\x00tensor.cdouble.LE/'
b'data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00'
b'\x00\x00\x02\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x90\x01\x00\x00tensor.cdouble.LE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00'
b'\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\xd5\x00\x00\x00\x00\x00\x00\x00\x12\x02\x00\x00\x00'
b'\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xe7\x02\x00\x00\x00\x00\x00\x00\x01\x00'
b'\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xd5\x00\x00\x00\x12\x02\x00\x00'
b'\x00\x00')
data_le_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1e\x00\x04\x00tensor.cdouble.LE.BOM/data.pklFB\x00\x00\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nComplexDoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04'
b'tq\x05QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDi'
b'ct\nq\x08)Rq\ttq\nRq\x0b.PK\x07\x08(W{\xca\xa2\x00\x00\x00\xa2\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x1f\x00\x11\x00tensor.cdouble.LE.BOM/byteorderFB\r\x00ZZZZZZZZZZZZZ'
b'littlePK\x07\x08\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c'
b'\x000\x00tensor.cdouble.LE.BOM/data/0FB,\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZ\xd1/\x84\xd8,\x00\xcd\xbf|L\xcf\xd0O\xee\xd7\xbfb\xb6<\xb4\xe2_\xec?'
b'v+\x86\xd9\xca\x0e\xf8?i#\xbb\xfcU\x1b\xe0\xbf\x984\xcd\x02q\x8a\xe9?\xc1_\xd7R\xe3'
b'\xfb\xe3\xbf\xcf\xce>\xcd\xa2\x9f\xe8?PK\x07\x08\x1d\xed\xed\xa0@\x00\x00\x00'
b'@\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x1d\x005\x00tensor.cdouble.LE.BOM/versionFB1\x00'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00'
b'\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00'
b'(W{\xca\xa2\x00\x00\x00\xa2\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00tensor.cdouble.LE.BOM/data.pklPK\x01\x02\x00\x00'
b'\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00'
b'\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00\x00\x00te'
b'nsor.cdouble.LE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\x1d\xed\xed\xa0@\x00\x00\x00@\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00V\x01\x00\x00tensor.cdouble.LE.BOM/data/0PK\x01\x02\x00'
b'\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00'
b'\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x02\x00\x00te'
b'nsor.cdouble.LE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00'
b'\x00\x00\x00.\x01\x00\x00\x00\x00\x00\x00\x92\x02\x00\x00\x00\x00\x00\x00PK\x06'
b'\x07\x00\x00\x00\x00\xc0\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06'
b'\x00\x00\x00\x00\x04\x00\x04\x00.\x01\x00\x00\x92\x02\x00\x00\x00\x00')
data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1a\x00\x08\x00tensor.cdouble.BE/data.pklFB\x04\x00ZZZZ\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nComplexDoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04'
b'tq\x05QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDi'
b'ct\nq\x08)Rq\ttq\nRq\x0b.PK\x07\x08(W{\xca\xa2\x00\x00\x00\xa2\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x18\x00\x18\x00tensor.cdouble.BE/data/0FB\x14\x00ZZZZZZZZZZZZZZZZZZ'
b'ZZ\xbf\xcd\x00,\xd8\x84/\xd1\xbf\xd7\xeeO\xd0\xcfL|?\xec_\xe2\xb4<\xb6b?\xf8\x0e'
b'\xca\xd9\x86+v\xbf\xe0\x1bU\xfc\xbb#i?\xe9\x8aq\x02\xcd4\x98\xbf\xe3\xfb\xe3R\xd7'
b'_\xc1?\xe8\x9f\xa2\xcd>\xce\xcfPK\x07\x08\x91\xbey\x14@\x00\x00\x00@\x00\x00\x00'
b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x19\x009\x00tensor.cdouble.BE/versionFB5\x00ZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00\x00\x02'
b'\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00(W{\xca\xa2'
b'\x00\x00\x00\xa2\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00tensor.cdouble.BE/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08'
b'\x00\x00\x00\x00\x00\x00\x91\xbey\x14@\x00\x00\x00@\x00\x00\x00\x18\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00\x00\x00tensor.cdouble.BE/data/0'
b'PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90'
b'\x01\x00\x00tensor.cdouble.BE/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e'
b'\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00'
b'\x00\x00\x00\x00\x00\x00\xd5\x00\x00\x00\x00\x00\x00\x00\x12\x02\x00\x00\x00\x00'
b'\x00\x00PK\x06\x07\x00\x00\x00\x00\xe7\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00'
b'PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00\xd5\x00\x00\x00\x12\x02\x00\x00\x00'
b'\x00')
data_be_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x1e\x00\x04\x00tensor.cdouble.BE.BOM/data.pklFB\x00\x00\x80'
b'\x02ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorc'
b'h\nComplexDoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04'
b'tq\x05QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDi'
b'ct\nq\x08)Rq\ttq\nRq\x0b.PK\x07\x08(W{\xca\xa2\x00\x00\x00\xa2\x00\x00\x00PK\x03'
b'\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x1f\x00\x11\x00tensor.cdouble.BE.BOM/byteorderFB\r\x00ZZZZZZZZZZZZZ'
b'bigPK\x07\x08I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00PK\x03\x04\x00\x00\x08'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c'
b'\x003\x00tensor.cdouble.BE.BOM/data/0FB/\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
b'ZZZZZZZZZZZ\xbf\xcd\x00,\xd8\x84/\xd1\xbf\xd7\xeeO\xd0\xcfL|?\xec_\xe2\xb4<\xb6b'
b'?\xf8\x0e\xca\xd9\x86+v\xbf\xe0\x1bU\xfc\xbb#i?\xe9\x8aq\x02\xcd4\x98\xbf\xe3\xfb'
b'\xe3R\xd7_\xc1?\xe8\x9f\xa2\xcd>\xce\xcfPK\x07\x08\x91\xbey\x14@\x00\x00\x00@\x00'
b'\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x1d\x005\x00tensor.cdouble.BE.BOM/versionFB1\x00ZZZ'
b'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00'
b'\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00('
b'W{\xca\xa2\x00\x00\x00\xa2\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00tensor.cdouble.BE.BOM/data.pklPK\x01\x02\x00\x00\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00I\xe2\xfb\xd3\x03\x00\x00\x00\x03\x00\x00\x00'
b'\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00\x00\x00tenso'
b'r.cdouble.BE.BOM/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00'
b'\x00\x91\xbey\x14@\x00\x00\x00@\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00S\x01\x00\x00tensor.cdouble.BE.BOM/data/0PK\x01\x02\x00\x00\x00'
b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00'
b'\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x02\x00\x00tensor.c'
b'double.BE.BOM/versionPK\x06\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00'
b'\x00\x00.\x01\x00\x00\x00\x00\x00\x00\x92\x02\x00\x00\x00\x00\x00\x00PK\x06\x07'
b'\x00\x00\x00\x00\xc0\x03\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00'
b'\x00\x00\x04\x00\x04\x00.\x01\x00\x00\x92\x02\x00\x00\x00\x00')
current_load_endian = get_default_load_endianness()
buf_le_no_bom = io.BytesIO(data_le_no_bom)
buf_le_bom = io.BytesIO(data_le_bom)
buf_be_no_bom = io.BytesIO(data_be_no_bom)
buf_be_bom = io.BytesIO(data_be_bom)
try:
set_default_load_endianness(LoadEndianness.NATIVE)
tensor_le_no_bom = torch.load(buf_le_no_bom)
tensor_be_no_bom = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
tensor_le_bom = torch.load(buf_le_bom)
tensor_be_bom = torch.load(buf_be_bom)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.LITTLE)
tensor_le_no_bom_little = torch.load(buf_le_no_bom)
tensor_be_no_bom_little = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
buf_le_no_bom.seek(0)
buf_be_no_bom.seek(0)
try:
set_default_load_endianness(LoadEndianness.BIG)
tensor_le_no_bom_big = torch.load(buf_le_no_bom)
tensor_be_no_bom_big = torch.load(buf_be_no_bom)
finally:
set_default_load_endianness(current_load_endian)
self.assertTrue(torch.equal(tensor_le_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_no_bom))
self.assertTrue(torch.equal(tensor_le_no_bom_little, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom_little, tensor_be_bom))
self.assertFalse(torch.equal(tensor_le_no_bom_big, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom_big, tensor_be_bom))
if (sys.byteorder == 'little'):
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_be_no_bom, tensor_be_bom))
else:
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_le_bom))
self.assertFalse(torch.equal(tensor_le_no_bom, tensor_be_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
forward
|
def forward(self, input):
return self.fc2(self.fc1(input))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class DummyModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_serialization_mmap_loading_with_map_location
|
def test_serialization_mmap_loading_with_map_location(self):
class DummyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(3, 1024)
self.fc2 = torch.nn.Linear(1024, 5)
def forward(self, input):
return self.fc2(self.fc1(input))
# make sure mmap where tensors' location tags are not CPU does not crash
# zipfile will first be mmap-ed on CPU and storages are extracted using
# overall_storage[start_offset:end_offset] before running
# _{device}_deserialize, which moves the storage to device
with TemporaryFileName() as f:
with torch.device('cuda'):
m = DummyModel()
state_dict = m.state_dict()
torch.save(state_dict, f)
result = torch.load(f, mmap=True)
for v in result.values():
self.assertTrue(v.is_cuda)
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSerialization(TestCase, SerializationMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
forward
|
def forward(self, input):
return self.fc2(self.fc1(input))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class DummyModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_shape_ops.py
|
test_nonzero
|
def test_nonzero(self, device, dtype):
shapes = [
torch.Size((12,)),
torch.Size((12, 1)),
torch.Size((1, 12)),
torch.Size((6, 2)),
torch.Size((3, 2, 2)),
torch.Size((5, 5, 5)),
]
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(dtype)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != 'xla':
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.float, device=device))
)
if self.device_type == 'cuda':
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.long))
)
np_array = tensor.cpu().numpy() if dtype != torch.bfloat16 else tensor.float().cpu().numpy()
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
|
def test_nonzero(self, device, dtype):
shapes = [
torch.Size((12,)),
torch.Size((12, 1)),
torch.Size((1, 12)),
torch.Size((6, 2)),
torch.Size((3, 2, 2)),
torch.Size((5, 5, 5)),
]
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(
dtype
)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != "xla":
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.float, device=device)
),
)
if (
self.device_type == "cuda"
or self.device_type == TEST_PRIVATEUSE1_DEVICE_TYPE
):
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.long)
),
)
np_array = (
tensor.cpu().numpy()
if dtype != torch.bfloat16
else tensor.float().cpu().numpy()
)
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
|
import torch
import numpy as np
from itertools import product, combinations, permutations, chain
from functools import partial
import random
import warnings
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfTorchDynamo, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyNativeDeviceTypes,
dtypesIfCUDA, largeTensorTest)
from torch.testing._internal.common_dtype import all_types_and_complex_and, all_types, all_types_and
class TestShapeOps(TestCase):
|
import random
import unittest
import warnings
from functools import partial
from itertools import chain, combinations, permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
all_types_and_complex_and,
)
from torch.testing._internal.common_utils import (
IS_JETSON,
run_tests,
skipIfTorchDynamo,
TEST_PRIVATEUSE1_DEVICE_TYPE,
TestCase,
torch_to_numpy_dtype_dict,
)
class TestShapeOps(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_shape_ops.py
|
gen_nontrivial_input
|
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(dtype)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != 'xla':
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.float, device=device))
)
if self.device_type == 'cuda':
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.long))
)
np_array = tensor.cpu().numpy() if dtype != torch.bfloat16 else tensor.float().cpu().numpy()
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
|
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(
dtype
)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != "xla":
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.float, device=device)
),
)
if (
self.device_type == "cuda"
or self.device_type == TEST_PRIVATEUSE1_DEVICE_TYPE
):
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.long)
),
)
np_array = (
tensor.cpu().numpy()
if dtype != torch.bfloat16
else tensor.float().cpu().numpy()
)
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
|
import torch
import numpy as np
from itertools import product, combinations, permutations, chain
from functools import partial
import random
import warnings
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfTorchDynamo, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyNativeDeviceTypes,
dtypesIfCUDA, largeTensorTest)
from torch.testing._internal.common_dtype import all_types_and_complex_and, all_types, all_types_and
|
import random
import unittest
import warnings
from functools import partial
from itertools import chain, combinations, permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
all_types_and_complex_and,
)
from torch.testing._internal.common_utils import (
IS_JETSON,
run_tests,
skipIfTorchDynamo,
TEST_PRIVATEUSE1_DEVICE_TYPE,
TestCase,
torch_to_numpy_dtype_dict,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sort_and_select.py
|
assertIsOrdered
|
def assertIsOrdered(self, order, x, mxx, ixx, task):
SIZE = x.size(1)
if order == 'descending':
def check_order(a, b):
# `a != a` because we put NaNs
# at the end of ascending sorted lists,
# and the beginning of descending ones.
return ((a != a) | (a >= b)).all().item()
elif order == 'ascending':
def check_order(a, b):
# see above
return ((b != b) | (a <= b)).all().item()
else:
error('unknown order "{}", must be "ascending" or "descending"'.format(order))
are_ordered = True
for k in range(1, SIZE):
self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),
'torch.sort ({}) values unordered for {}'.format(order, task))
seen = set()
indicesCorrect = True
size0 = x.size(0)
size = x.size(x.dim() - 1)
x = x.tolist()
mxx = mxx.tolist()
ixx = ixx.tolist()
for k in range(size0):
seen.clear()
for j in range(size):
self.assertEqual(x[k][ixx[k][j]], mxx[k][j],
msg='torch.sort ({}) indices wrong for {}'.format(order, task))
seen.add(ixx[k][j])
self.assertEqual(len(seen), size)
|
def assertIsOrdered(self, order, x, mxx, ixx, task):
SIZE = x.size(1)
if order == "descending":
def check_order(a, b):
# `a != a` because we put NaNs
# at the end of ascending sorted lists,
# and the beginning of descending ones.
return ((a != a) | (a >= b)).all().item()
elif order == "ascending":
def check_order(a, b):
# see above
return ((b != b) | (a <= b)).all().item()
else:
error( # noqa: F821
f'unknown order "{order}", must be "ascending" or "descending"'
)
are_ordered = True
for k in range(1, SIZE):
self.assertTrue(
check_order(mxx[:, k - 1], mxx[:, k]),
f"torch.sort ({order}) values unordered for {task}",
)
seen = set()
indicesCorrect = True
size0 = x.size(0)
size = x.size(x.dim() - 1)
x = x.tolist()
mxx = mxx.tolist()
ixx = ixx.tolist()
for k in range(size0):
seen.clear()
for j in range(size):
self.assertEqual(
x[k][ixx[k][j]],
mxx[k][j],
msg=f"torch.sort ({order}) indices wrong for {task}",
)
seen.add(ixx[k][j])
self.assertEqual(len(seen), size)
|
import torch
import numpy as np
import random
from torch import nan
from itertools import permutations, product
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and
from torch.testing._internal.common_utils import \
(TestCase, run_tests, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes,
onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
SIZE = 100
class TestSortAndSelect(TestCase):
from itertools import chain, combinations
|
import random
from itertools import permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCPU,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
floating_types_and,
integral_types,
)
from torch.testing._internal.common_utils import (
run_tests,
skipIfTorchDynamo,
slowTest,
TestCase,
)
class TestSortAndSelect(TestCase):
from itertools import chain, combinations
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_tensor_subclass_getstate_overwrite
|
def test_tensor_subclass_getstate_overwrite(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestGetStateSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
with BytesIOContext() as f:
torch.save(my_tensor, f)
f.seek(0)
new_tensor = torch.load(f)
self.assertIsInstance(new_tensor, TestGetStateSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
self.assertTrue(new_tensor.reloaded)
|
def test_tensor_subclass_getstate_overwrite(self):
wrapped_tensor = torch.rand(2)
my_tensor = TestGetStateSubclass(wrapped_tensor)
foo_val = "bar"
my_tensor.foo = foo_val
self.assertEqual(my_tensor.foo, foo_val)
with BytesIOContext() as f:
torch.save(my_tensor, f)
f.seek(0)
with safe_globals([TestGetStateSubclass]):
new_tensor = torch.load(f)
self.assertIsInstance(new_tensor, TestGetStateSubclass)
self.assertEqual(new_tensor.elem, my_tensor.elem)
self.assertEqual(new_tensor.foo, foo_val)
self.assertTrue(new_tensor.reloaded)
|
import torch
import unittest
import io
import tempfile
import os
import gc
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from copy import deepcopy
from itertools import product
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, TEST_DILL, \
run_tests, download_file, BytesIOContext, TemporaryFileName, parametrize, instantiate_parametrized_tests
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
import dill
can_retrieve_source = True
import importlib.util
class TestSubclassSerialization(TestCase):
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSubclassSerialization(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_serialization.py
|
test_safe_globals_for_weights_only
|
def test_safe_globals_for_weights_only(self):
'''
Tests import semantic for tensor subclass and the {add/get/clear}_safe_globals APIs
'''
t = TwoTensor(torch.randn(2, 3), torch.randn(2, 3))
p = torch.nn.Parameter(t)
sd = OrderedDict([('t', t), ('p', p)])
with tempfile.NamedTemporaryFile() as f:
torch.save(sd, f)
# Loading tensor subclass with weights_only=True should fail
# since tensor subclass is not in safe_globals
with self.assertRaisesRegex(pickle.UnpicklingError,
"Unsupported global: GLOBAL torch.testing._internal.two_tensor.TwoTensor"):
f.seek(0)
sd = torch.load(f, weights_only=True)
# Loading tensor subclass should work if the class is marked safe
f.seek(0)
try:
torch.serialization.add_safe_globals([TwoTensor])
self.assertTrue(torch.serialization.get_safe_globals() == [TwoTensor])
sd = torch.load(f, weights_only=True)
self.assertEqual(sd['t'], t)
self.assertEqual(sd['p'], p)
# Should fail again when safe globals are cleared
torch.serialization.clear_safe_globals()
f.seek(0)
with self.assertRaisesRegex(pickle.UnpicklingError,
"Unsupported global: GLOBAL torch.testing._internal.two_tensor.TwoTensor"):
torch.load(f, weights_only=True)
finally:
torch.serialization.clear_safe_globals()
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSubclassSerialization(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_serialization.py
|
test_tensor_subclass_map_location
|
instantiate_device_type_tests(TestBothSerialization, globals())
instantiate_parametrized_tests(TestSubclassSerialization)
instantiate_parametrized_tests(TestOldSerialization)
instantiate_parametrized_tests(TestSerialization)
if __name__ == '__main__':
run_tests()
|
def test_tensor_subclass_map_location(self):
t = TwoTensor(torch.randn(2, 3), torch.randn(2, 3))
sd = {'t': t}
with TemporaryFileName() as f:
torch.save(sd, f)
with safe_globals([TwoTensor]):
sd_loaded = torch.load(f, map_location=torch.device('cuda:0'))
self.assertTrue(sd_loaded['t'].device == torch.device('cuda:0'))
self.assertTrue(sd_loaded['t'].a.device == torch.device('cuda:0'))
self.assertTrue(sd_loaded['t'].b.device == torch.device('cuda:0'))
# make sure map_location is not propagated over multiple torch.load calls
sd_loaded = torch.load(f)
self.assertTrue(sd_loaded['t'].device == torch.device('cpu'))
self.assertTrue(sd_loaded['t'].a.device == torch.device('cpu'))
self.assertTrue(sd_loaded['t'].b.device == torch.device('cpu'))
|
import contextlib
import copy
import gc
import gzip
import io
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
import warnings
import zipfile
from collections import namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from pathlib import Path
import torch
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensorConverter
from torch._utils import _rebuild_tensor
from torch._utils_internal import get_file_path_2
from torch.serialization import (
check_module_version_greater_or_equal,
get_default_load_endianness,
LoadEndianness,
safe_globals,
set_default_load_endianness,
skip_data,
SourceChangeWarning,
)
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import (
AlwaysWarnTypedStorageRemoval,
BytesIOContext,
download_file,
instantiate_parametrized_tests,
IS_FBCODE,
IS_FILESYSTEM_UTF8_ENCODING,
IS_WINDOWS,
parametrize,
run_tests,
serialTest,
skipIfTorchDynamo,
TemporaryDirectoryName,
TemporaryFileName,
TEST_DILL,
TestCase,
)
from torch.testing._internal.two_tensor import TwoTensor # noqa: F401
from torch.utils._import_utils import import_dill
from mmap import MAP_PRIVATE, MAP_SHARED
dill = import_dill()
HAS_DILL_AT_LEAST_0_3_1 = dill is not None and check_module_version_greater_or_equal(dill, (0, 3, 1))
can_retrieve_source = True
Point = namedtuple('Point', ['x', 'y'])
import importlib.util
class TestSubclassSerialization(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_shape_ops.py
|
_generate_input
|
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
class TestShapeOps(TestCase):
# TODO: update to work on CUDA, too
@onlyCPU
def test_unbind(self, device):
x = torch.rand(2, 3, 4, 5)
for dim in range(4):
res = torch.unbind(x, dim)
res2 = x.unbind(dim)
self.assertEqual(x.size(dim), len(res))
self.assertEqual(x.size(dim), len(res2))
for i in range(dim):
self.assertEqual(x.select(dim, i), res[i])
self.assertEqual(x.select(dim, i), res2[i])
# TODO: update to work on CUDA, too?
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@onlyCPU
def test_tolist(self, device):
list0D = []
tensor0D = torch.tensor(list0D)
self.assertEqual(tensor0D.tolist(), list0D)
table1D = [1., 2., 3.]
tensor1D = torch.tensor(table1D)
storage = torch.Storage(table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
table2D = [[1, 2], [3, 4]]
tensor2D = torch.tensor(table2D)
self.assertEqual(tensor2D.tolist(), table2D)
tensor3D = torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
tensorNonContig = tensor3D.select(1, 1)
self.assertFalse(tensorNonContig.is_contiguous())
self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim_invalid(self, device, dtype):
shape = self._rand_shape(4, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, False)
for fn in [torch.movedim, torch.moveaxis]:
# Invalid `source` and `destination` dimension
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 5, 0)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 0, 5)
# Mismatch in size of `source` and `destination`
with self.assertRaisesRegex(RuntimeError, "movedim: Invalid source or destination dims:"):
fn(x, (1, 0), (0, ))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `source`"):
fn(x, (0, 0), (0, 1))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `source`"):
fn(x, (0, 1, 0), (0, 1, 2))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `destination`"):
fn(x, (0, 1), (1, 1))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `destination`"):
fn(x, (0, 1, 2), (1, 0, 1))
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim(self, device, dtype):
for fn in [torch.moveaxis, torch.movedim]:
for nd in range(5):
shape = self._rand_shape(nd, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, with_extremal=False)
for random_negative in [True, False]:
for src_dim, dst_dim in permutations(range(nd), r=2):
random_prob = random.random()
if random_negative and random_prob > 0.66:
src_dim = src_dim - nd
elif random_negative and random_prob > 0.33:
dst_dim = dst_dim - nd
elif random_negative:
src_dim = src_dim - nd
dst_dim = dst_dim - nd
# Integer `source` and `destination`
torch_fn = partial(fn, source=src_dim, destination=dst_dim)
np_fn = partial(np.moveaxis, source=src_dim, destination=dst_dim)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
if nd == 0:
continue
def make_index_negative(sequence, idx):
sequence = list(sequence)
sequence[random_idx] = sequence[random_idx] - nd
return tuple(src_sequence)
for src_sequence in permutations(range(nd), r=random.randint(1, nd)):
# Sequence `source` and `destination`
dst_sequence = tuple(random.sample(range(nd), len(src_sequence)))
# Randomly change a dim to a negative dim representation of itself.
random_prob = random.random()
if random_negative and random_prob > 0.66:
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
elif random_negative and random_prob > 0.33:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
elif random_negative:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
torch_fn = partial(fn, source=src_sequence, destination=dst_sequence)
np_fn = partial(np.moveaxis, source=src_sequence, destination=dst_sequence)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Move dim to same position
x = torch.randn(2, 3, 5, 7, 11)
torch_fn = partial(fn, source=(0, 1), destination=(0, 1))
np_fn = partial(np.moveaxis, source=(0, 1), destination=(0, 1))
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
torch_fn = partial(fn, source=1, destination=1)
np_fn = partial(np.moveaxis, source=1, destination=1)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Empty Sequence
torch_fn = partial(fn, source=(), destination=())
np_fn = partial(np.moveaxis, source=(), destination=())
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
@dtypes(torch.float, torch.bool)
def test_diag(self, device, dtype):
if dtype is torch.bool:
x = torch.rand(100, 100, device=device) >= 0.5
else:
x = torch.rand(100, 100, dtype=dtype, device=device)
res1 = torch.diag(x)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.diag(x, out=res2)
self.assertEqual(res1, res2)
def test_diagonal(self, device):
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
@onlyCPU
@dtypes(torch.float)
def test_diagonal_multidim(self, device, dtype):
x = torch.randn(10, 11, 12, 13, dtype=dtype, device=device)
xn = x.numpy()
for args in [(2, 2, 3),
(2,),
(-2, 1, 2),
(0, -2, -1)]:
result = torch.diagonal(x, *args)
expected = xn.diagonal(*args)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
# test non-continguous
xp = x.permute(1, 2, 3, 0)
result = torch.diagonal(xp, 0, -2, -1)
expected = xp.numpy().diagonal(0, -2, -1)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
@onlyNativeDeviceTypes
@dtypes(*all_types())
@dtypesIfCUDA(*all_types_and(torch.half))
def test_trace(self, device, dtype):
def test(shape):
tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
expected_dtype = tensor.sum().dtype
expected_dtype = torch_to_numpy_dtype_dict[expected_dtype]
result = np.trace(tensor.cpu().numpy(), dtype=expected_dtype)
expected = torch.tensor(result, device=device)
self.assertEqual(tensor.trace(), expected)
shapes = (
[10, 1],
[1, 10],
[100, 100],
[20, 100],
[100, 20],
)
for shape in shapes:
test(shape)
def generate_clamp_baseline(self, device, dtype, *, min_vals, max_vals, with_nans):
"""
Creates a random tensor for a given device and dtype, and computes the expected clamped
values given the min_vals and/or max_vals.
If with_nans is provided, then some values are randomly set to nan.
"""
X = torch.rand(100, device=device).mul(50).add(-25) # uniform in [-25, 25]
X = X.to(dtype)
if with_nans:
mask = torch.randint(0, 2, X.shape, dtype=torch.bool, device=device)
X[mask] = nan
if isinstance(min_vals, torch.Tensor):
min_vals = min_vals.cpu().numpy()
if isinstance(max_vals, torch.Tensor):
max_vals = max_vals.cpu().numpy()
# Use NumPy implementation as reference
X_clamped = torch.tensor(np.clip(X.cpu().numpy(), a_min=min_vals, a_max=max_vals), device=device)
return X, X_clamped
# Tests clamp and its alias, clip
@dtypes(torch.int64, torch.float32)
def test_clamp(self, device, dtype):
op_list = (torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_,
torch.clip, torch.Tensor.clip, torch.Tensor.clip_)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(device, dtype,
min_vals=min_val,
max_vals=max_val,
with_nans=False)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, Y_actual)
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min=min_val, max=max_val, out=Y_out)
self.assertEqual(Y_expected, Y_out)
def test_clamp_propagates_nans(self, device):
op_list = (torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_,
torch.clip, torch.Tensor.clip, torch.Tensor.clip_)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(device, torch.float,
min_vals=min_val,
max_vals=max_val,
with_nans=True)
Y_expected = torch.isnan(Y_expected)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, torch.isnan(Y_actual))
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min_val, max_val, out=Y_out)
self.assertEqual(Y_expected, torch.isnan(Y_out))
def test_clamp_raises_arg_errors(self, device):
X = torch.randn(100, dtype=torch.float, device=device)
error_msg = 'At least one of \'min\' or \'max\' must not be None'
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp()
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp_()
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.clamp(X)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip(self, device, dtype):
make_from_data = partial(torch.tensor, device=device, dtype=dtype)
make_from_size = partial(make_tensor, device=device, dtype=dtype)
def test_flip_impl(input_t, dims, output_t):
def all_t():
yield input_t, output_t
if dtype is torch.float:
# We generate quantized versions as well
for qdtype in (torch.quint8, torch.qint8, torch.qint32):
qinput_t = torch.quantize_per_tensor(input_t, 0.1, 5, qdtype)
qoutput_t = torch.quantize_per_tensor(output_t, 0.1, 5, qdtype)
yield qinput_t, qoutput_t
for in_t, out_t in all_t():
self.assertEqual(in_t.flip(dims), out_t)
n = in_t.ndim
if not isinstance(dims, tuple):
# Wrap dim
self.assertEqual(in_t.flip(-n + dims), out_t)
else:
# Permute dimensions
for p_dims in permutations(dims):
self.assertEqual(in_t.flip(p_dims), out_t)
if len(p_dims) > 0:
# Wrap 1st dim
self.assertEqual(in_t.flip((-n + p_dims[0],) + p_dims[1:]), out_t)
def gen_data():
# Basic tests
data = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2)
nonctg = make_from_size((2, 2, 2), noncontiguous=True).copy_(data)
dims_result = ((0, make_from_data([5, 6, 7, 8, 1, 2, 3, 4]).view(2, 2, 2)),
(1, make_from_data([3, 4, 1, 2, 7, 8, 5, 6]).view(2, 2, 2)),
(2, make_from_data([2, 1, 4, 3, 6, 5, 8, 7]).view(2, 2, 2)),
((0, 1), make_from_data([7, 8, 5, 6, 3, 4, 1, 2]).view(2, 2, 2)),
((0, 1, 2), make_from_data([8, 7, 6, 5, 4, 3, 2, 1]).view(2, 2, 2)))
for in_tensor, (dims, out_tensor) in product((data, nonctg), dims_result):
yield in_tensor, dims, out_tensor
# Expanded
in_t = make_from_data([1, 2, 3]).view(3, 1).expand(3, 2)
dims = 0
out_t = make_from_data([3, 3, 2, 2, 1, 1]).view(3, 2)
yield in_t, dims, out_t
# Noop on expanded dimension
yield in_t, 1, in_t
# Transposed
in_t = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2).transpose(0, 1)
dims = (0, 1, 2)
out_t = make_from_data([8, 7, 4, 3, 6, 5, 2, 1]).view(2, 2, 2)
yield in_t, dims, out_t
# Rectangular case
in_t = make_from_data([1, 2, 3, 4, 5, 6]).view(2, 3)
dims = 0
out_t = make_from_data([[4, 5, 6], [1, 2, 3]])
yield in_t, dims, out_t
dims = 1
out_t = make_from_data([[3, 2, 1], [6, 5, 4]])
yield in_t, dims, out_t
# vectorized NCHW cases (images)
if device == "cpu" and dtype != torch.bfloat16:
for mf in [torch.contiguous_format, torch.channels_last]:
for c in [2, 3, 8, 16]:
in_t = make_from_size((2, c, 32, 32)).contiguous(memory_format=mf)
np_in_t = in_t.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_t, 3, out_t
np_out_t = np_in_t[:, :, ::-1, :].copy()
out_t = torch.from_numpy(np_out_t)
yield in_t, 2, out_t
# non-contig cases
in_tt = in_t[..., ::2, :]
np_in_t = in_tt.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_tt, 3, out_t
in_tt = in_t[..., ::2]
np_in_t = in_tt.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_tt, 3, out_t
# Noops (edge cases)
# Size 0
in_t = make_from_data(())
yield in_t, 0, in_t
yield in_t, (), in_t
# dims = ()
in_t = make_from_size((3, 2, 1))
yield in_t, (), in_t
# Zero elements, non-zero size
in_t = make_from_size((3, 0, 2))
for i in range(in_t.ndim):
yield in_t, i, in_t
# Size 1
in_t = make_from_size(())
yield in_t, 0, in_t
in_t = make_from_size((1,))
yield in_t, 0, in_t
for in_tensor, dims, out_tensor in gen_data():
test_flip_impl(in_tensor, dims, out_tensor)
# test for shape
size = [2, 3, 4]
data = make_from_size(size)
possible_dims = range(len(size))
test_dims = chain(combinations(possible_dims, 1), combinations(possible_dims, 2))
for dims in test_dims:
self.assertEqual(size, list(data.flip(dims).size()))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_errors(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
data = make_arg((2, 2, 2))
# not allow flip on the same dim more than once
self.assertRaises(RuntimeError, lambda: data.flip(0, 1, 1))
# not allow empty list as input
self.assertRaises(TypeError, lambda: data.flip())
# not allow dim > max dim
self.assertRaises(IndexError, lambda: data.flip(0, 1, 2, 3))
self.assertRaises(IndexError, lambda: data.flip(3))
def _rand_shape(self, dim, min_size, max_size):
return tuple(torch.randint(min_size, max_size + 1, (dim,)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_numpy(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
for ndim in [3, 4]:
shape = self._rand_shape(ndim, 5, 10)
data = make_arg(shape)
# Axis to sample for given shape.
for i in range(1, ndim + 1):
# Check all combinations of `i` axis.
for flip_dim in combinations(range(ndim), i):
torch_fn = partial(torch.flip, dims=flip_dim)
np_fn = partial(np.flip, axis=flip_dim)
self.compare_with_numpy(torch_fn, np_fn, data)
@onlyCUDA # CPU is too slow
@largeTensorTest('17GB') # 4 tensors of 4GB (in, out) x (torch, numpy) + 1GB
@largeTensorTest("81GB", "cpu") # even for CUDA test, sufficient system memory is required
def test_flip_large_tensor(self, device):
t_in = torch.empty(2**32 + 1, dtype=torch.uint8).random_()
torch_fn = partial(torch.flip, dims=(0,))
np_fn = partial(np.flip, axis=0)
self.compare_with_numpy(torch_fn, np_fn, t_in)
del t_in
def _test_fliplr_flipud(self, torch_fn, np_fn, min_dim, max_dim, device, dtype):
for dim in range(min_dim, max_dim + 1):
shape = self._rand_shape(dim, 5, 10)
# Randomly scale the input
if dtype.is_floating_point or dtype.is_complex:
data = torch.randn(*shape, device=device, dtype=dtype)
else:
data = torch.randint(0, 10, shape, device=device, dtype=dtype)
self.compare_with_numpy(torch_fn, np_fn, data)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr(self, device, dtype):
self._test_fliplr_flipud(torch.fliplr, np.fliplr, 2, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr_invalid(self, device, dtype):
x = torch.randn(42).to(dtype)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(x)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(torch.tensor(42, device=device, dtype=dtype))
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud(self, device, dtype):
self._test_fliplr_flipud(torch.flipud, np.flipud, 1, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud_invalid(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, "Input must be >= 1-d."):
torch.flipud(torch.tensor(42, device=device, dtype=dtype))
def test_rot90(self, device):
data = torch.arange(1, 5, device=device).view(2, 2)
self.assertEqual(torch.tensor([1, 2, 3, 4]).view(2, 2), data.rot90(0, [0, 1]))
self.assertEqual(torch.tensor([2, 4, 1, 3]).view(2, 2), data.rot90(1, [0, 1]))
self.assertEqual(torch.tensor([4, 3, 2, 1]).view(2, 2), data.rot90(2, [0, 1]))
self.assertEqual(torch.tensor([3, 1, 4, 2]).view(2, 2), data.rot90(3, [0, 1]))
# test for default args k=1, dims=[0, 1]
self.assertEqual(data.rot90(), data.rot90(1, [0, 1]))
# test for reversed order of dims
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(1, [1, 0]))
# test for modulo of k
self.assertEqual(data.rot90(5, [0, 1]), data.rot90(1, [0, 1]))
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(-1, [0, 1]))
self.assertEqual(data.rot90(-5, [0, 1]), data.rot90(-1, [0, 1]))
# test for dims out-of-range error
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, -3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 2]))
# test tensor with more than 2D
data = torch.arange(1, 9, device=device).view(2, 2, 2)
self.assertEqual(torch.tensor([2, 4, 1, 3, 6, 8, 5, 7]).view(2, 2, 2), data.rot90(1, [1, 2]))
self.assertEqual(data.rot90(1, [1, -1]), data.rot90(1, [1, 2]))
# test for errors
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [1, 1]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 1, 2]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0]))
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@dtypes(torch.cfloat, torch.cdouble)
def test_complex_rot90(self, device, dtype):
shape = self._rand_shape(random.randint(2, 4), 5, 10)
for rot_times in range(4):
data = torch.randn(*shape, device=device, dtype=dtype)
torch_fn = partial(torch.rot90, k=rot_times, dims=[0, 1])
np_fn = partial(np.rot90, k=rot_times, axes=[0, 1])
self.compare_with_numpy(torch_fn, np_fn, data)
# TODO: update once warning flag is available to always trigger ONCE warnings
# Ensures nonzero does not throw a warning, even when the as_tuple argument
# is not provided
def test_nonzero_no_warning(self, device):
t = torch.randn((2, 2), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.nonzero(t)
t.nonzero()
self.assertEqual(len(w), 0)
@dtypes(*all_types_and(torch.half, torch.bool, torch.bfloat16))
def test_nonzero(self, device, dtype):
shapes = [
torch.Size((12,)),
torch.Size((12, 1)),
torch.Size((1, 12)),
torch.Size((6, 2)),
torch.Size((3, 2, 2)),
torch.Size((5, 5, 5)),
]
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(dtype)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != 'xla':
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.float, device=device))
)
if self.device_type == 'cuda':
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.long))
)
np_array = tensor.cpu().numpy() if dtype != torch.bfloat16 else tensor.float().cpu().numpy()
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
def test_nonzero_astuple_out(self, device):
t = torch.randn((3, 3, 3), device=device)
out = torch.empty_like(t, dtype=torch.long)
with self.assertRaises(RuntimeError):
torch.nonzero(t, as_tuple=True, out=out)
self.assertEqual(torch.nonzero(t, as_tuple=False, out=out), torch.nonzero(t, out=out))
# Verifies that JIT script cannot handle the as_tuple kwarg
# See Issue https://github.com/pytorch/pytorch/issues/45499.
def _foo(t):
tuple_result = torch.nonzero(t, as_tuple=True)
nontuple_result = torch.nonzero(t, as_tuple=False)
out = torch.empty_like(nontuple_result)
torch.nonzero(t, as_tuple=False, out=out)
return tuple_result, nontuple_result, out
with self.assertRaises(RuntimeError):
scripted_foo = torch.jit.script(_foo)
# Verifies that JIT tracing works fine
traced_foo = torch.jit.trace(_foo, t)
traced_tuple, traced_nontuple, traced_out = traced_foo(t)
expected_tuple = torch.nonzero(t, as_tuple=True)
expected_nontuple = torch.nonzero(t)
self.assertEqual(traced_tuple, expected_tuple)
self.assertEqual(traced_nontuple, expected_nontuple)
self.assertEqual(traced_out, expected_nontuple)
@onlyNativeDeviceTypes
def test_nonzero_discontiguous(self, device):
shape = (4, 4)
tensor = torch.randint(2, shape, device=device)
tensor_nc = torch.empty(shape[0], shape[1] * 2, device=device)[:, ::2].copy_(tensor)
dst1 = tensor.nonzero(as_tuple=False)
dst2 = tensor_nc.nonzero(as_tuple=False)
self.assertEqual(dst1, dst2, atol=0, rtol=0)
dst3 = torch.empty_like(dst1)
data_ptr = dst3.data_ptr()
# expect dst3 storage to be reused
torch.nonzero(tensor, out=dst3)
self.assertEqual(data_ptr, dst3.data_ptr())
self.assertEqual(dst1, dst3, atol=0, rtol=0)
# discontiguous out
dst4 = torch.empty(dst1.size(0), dst1.size(1) * 2, dtype=torch.long, device=device)[:, ::2]
data_ptr = dst4.data_ptr()
strides = dst4.stride()
torch.nonzero(tensor, out=dst4)
self.assertEqual(data_ptr, dst4.data_ptr())
self.assertEqual(dst1, dst4, atol=0, rtol=0)
self.assertEqual(strides, dst4.stride())
def test_nonzero_non_diff(self, device):
x = torch.randn(10, requires_grad=True)
nz = x.nonzero()
self.assertFalse(nz.requires_grad)
@dtypes(torch.int64, torch.float, torch.complex128)
def test_sparse_dense_dim(self, device, dtype):
for shape in [(), (2, ), (2, 3)]:
if dtype.is_complex or dtype.is_floating_point:
x = torch.rand(shape, device=device, dtype=dtype)
else:
x = torch.randint(-9, 9, shape, device=device, dtype=dtype)
self.assertEqual(x.sparse_dim(), 0)
self.assertEqual(x.dense_dim(), len(shape))
instantiate_device_type_tests(TestShapeOps, globals())
if __name__ == '__main__':
run_tests()
|
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(
30, 100
)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float("nan")
x[torch.randn(*shape) > 0.5] = float("inf")
x[torch.randn(*shape) > 0.5] = float("-inf")
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex("nan")
x[torch.randn(*shape) > 0.5] = complex("inf")
x[torch.randn(*shape) > 0.5] = complex("-inf")
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
class TestShapeOps(TestCase):
# TODO: update to work on CUDA, too
@onlyCPU
def test_unbind(self, device):
x = torch.rand(2, 3, 4, 5)
for dim in range(4):
res = torch.unbind(x, dim)
res2 = x.unbind(dim)
self.assertEqual(x.size(dim), len(res))
self.assertEqual(x.size(dim), len(res2))
for i in range(dim):
self.assertEqual(x.select(dim, i), res[i])
self.assertEqual(x.select(dim, i), res2[i])
# TODO: update to work on CUDA, too?
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@onlyCPU
def test_tolist(self, device):
list0D = []
tensor0D = torch.tensor(list0D)
self.assertEqual(tensor0D.tolist(), list0D)
table1D = [1.0, 2.0, 3.0]
tensor1D = torch.tensor(table1D)
storage = torch.Storage(table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
table2D = [[1, 2], [3, 4]]
tensor2D = torch.tensor(table2D)
self.assertEqual(tensor2D.tolist(), table2D)
tensor3D = torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
tensorNonContig = tensor3D.select(1, 1)
self.assertFalse(tensorNonContig.is_contiguous())
self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim_invalid(self, device, dtype):
shape = self._rand_shape(4, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, False)
for fn in [torch.movedim, torch.moveaxis]:
# Invalid `source` and `destination` dimension
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 5, 0)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 0, 5)
# Mismatch in size of `source` and `destination`
with self.assertRaisesRegex(
RuntimeError, "movedim: Invalid source or destination dims:"
):
fn(x, (1, 0), (0,))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `source`"
):
fn(x, (0, 0), (0, 1))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `source`"
):
fn(x, (0, 1, 0), (0, 1, 2))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `destination`"
):
fn(x, (0, 1), (1, 1))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `destination`"
):
fn(x, (0, 1, 2), (1, 0, 1))
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim(self, device, dtype):
for fn in [torch.moveaxis, torch.movedim]:
for nd in range(5):
shape = self._rand_shape(nd, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, with_extremal=False)
for random_negative in [True, False]:
for src_dim, dst_dim in permutations(range(nd), r=2):
random_prob = random.random()
if random_negative and random_prob > 0.66:
src_dim = src_dim - nd
elif random_negative and random_prob > 0.33:
dst_dim = dst_dim - nd
elif random_negative:
src_dim = src_dim - nd
dst_dim = dst_dim - nd
# Integer `source` and `destination`
torch_fn = partial(fn, source=src_dim, destination=dst_dim)
np_fn = partial(
np.moveaxis, source=src_dim, destination=dst_dim
)
self.compare_with_numpy(
torch_fn, np_fn, x, device=None, dtype=None
)
if nd == 0:
continue
def make_index_negative(sequence, idx):
sequence = list(sequence)
sequence[random_idx] = sequence[random_idx] - nd
return tuple(src_sequence)
for src_sequence in permutations(
range(nd), r=random.randint(1, nd)
):
# Sequence `source` and `destination`
dst_sequence = tuple(
random.sample(range(nd), len(src_sequence))
)
# Randomly change a dim to a negative dim representation of itself.
random_prob = random.random()
if random_negative and random_prob > 0.66:
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
elif random_negative and random_prob > 0.33:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
elif random_negative:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
torch_fn = partial(
fn, source=src_sequence, destination=dst_sequence
)
np_fn = partial(
np.moveaxis, source=src_sequence, destination=dst_sequence
)
self.compare_with_numpy(
torch_fn, np_fn, x, device=None, dtype=None
)
# Move dim to same position
x = torch.randn(2, 3, 5, 7, 11)
torch_fn = partial(fn, source=(0, 1), destination=(0, 1))
np_fn = partial(np.moveaxis, source=(0, 1), destination=(0, 1))
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
torch_fn = partial(fn, source=1, destination=1)
np_fn = partial(np.moveaxis, source=1, destination=1)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Empty Sequence
torch_fn = partial(fn, source=(), destination=())
np_fn = partial(np.moveaxis, source=(), destination=())
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
@dtypes(torch.float, torch.bool)
def test_diag(self, device, dtype):
if dtype is torch.bool:
x = torch.rand(100, 100, device=device) >= 0.5
else:
x = torch.rand(100, 100, dtype=dtype, device=device)
res1 = torch.diag(x)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.diag(x, out=res2)
self.assertEqual(res1, res2)
def test_diagonal(self, device):
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
@onlyCPU
@dtypes(torch.float)
def test_diagonal_multidim(self, device, dtype):
x = torch.randn(10, 11, 12, 13, dtype=dtype, device=device)
xn = x.numpy()
for args in [(2, 2, 3), (2,), (-2, 1, 2), (0, -2, -1)]:
result = torch.diagonal(x, *args)
expected = xn.diagonal(*args)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
# test non-continguous
xp = x.permute(1, 2, 3, 0)
result = torch.diagonal(xp, 0, -2, -1)
expected = xp.numpy().diagonal(0, -2, -1)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
@onlyNativeDeviceTypes
@dtypes(*all_types())
@dtypesIfCUDA(*all_types_and(torch.half))
def test_trace(self, device, dtype):
def test(shape):
tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
expected_dtype = tensor.sum().dtype
expected_dtype = torch_to_numpy_dtype_dict[expected_dtype]
result = np.trace(tensor.cpu().numpy(), dtype=expected_dtype)
expected = torch.tensor(result, device=device)
self.assertEqual(tensor.trace(), expected)
shapes = (
[10, 1],
[1, 10],
[100, 100],
[20, 100],
[100, 20],
)
for shape in shapes:
test(shape)
def generate_clamp_baseline(self, device, dtype, *, min_vals, max_vals, with_nans):
"""
Creates a random tensor for a given device and dtype, and computes the expected clamped
values given the min_vals and/or max_vals.
If with_nans is provided, then some values are randomly set to nan.
"""
X = torch.rand(100, device=device).mul(50).add(-25) # uniform in [-25, 25]
X = X.to(dtype)
if with_nans:
mask = torch.randint(0, 2, X.shape, dtype=torch.bool, device=device)
X[mask] = nan
if isinstance(min_vals, torch.Tensor):
min_vals = min_vals.cpu().numpy()
if isinstance(max_vals, torch.Tensor):
max_vals = max_vals.cpu().numpy()
# Use NumPy implementation as reference
X_clamped = torch.tensor(
np.clip(X.cpu().numpy(), a_min=min_vals, a_max=max_vals), device=device
)
return X, X_clamped
# Tests clamp and its alias, clip
@dtypes(torch.int64, torch.float32)
def test_clamp(self, device, dtype):
op_list = (
torch.clamp,
torch.Tensor.clamp,
torch.Tensor.clamp_,
torch.clip,
torch.Tensor.clip,
torch.Tensor.clip_,
)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(
device, dtype, min_vals=min_val, max_vals=max_val, with_nans=False
)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, Y_actual)
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min=min_val, max=max_val, out=Y_out)
self.assertEqual(Y_expected, Y_out)
def test_clamp_propagates_nans(self, device):
op_list = (
torch.clamp,
torch.Tensor.clamp,
torch.Tensor.clamp_,
torch.clip,
torch.Tensor.clip,
torch.Tensor.clip_,
)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(
device,
torch.float,
min_vals=min_val,
max_vals=max_val,
with_nans=True,
)
Y_expected = torch.isnan(Y_expected)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, torch.isnan(Y_actual))
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min_val, max_val, out=Y_out)
self.assertEqual(Y_expected, torch.isnan(Y_out))
def test_clamp_raises_arg_errors(self, device):
X = torch.randn(100, dtype=torch.float, device=device)
error_msg = "At least one of 'min' or 'max' must not be None"
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp()
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp_()
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.clamp(X)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip(self, device, dtype):
make_from_data = partial(torch.tensor, device=device, dtype=dtype)
make_from_size = partial(make_tensor, device=device, dtype=dtype)
def test_flip_impl(input_t, dims, output_t):
def all_t():
yield input_t, output_t
if dtype is torch.float:
# We generate quantized versions as well
for qdtype in (torch.quint8, torch.qint8, torch.qint32):
qinput_t = torch.quantize_per_tensor(input_t, 0.1, 5, qdtype)
qoutput_t = torch.quantize_per_tensor(output_t, 0.1, 5, qdtype)
yield qinput_t, qoutput_t
for in_t, out_t in all_t():
self.assertEqual(in_t.flip(dims), out_t)
n = in_t.ndim
if not isinstance(dims, tuple):
# Wrap dim
self.assertEqual(in_t.flip(-n + dims), out_t)
else:
# Permute dimensions
for p_dims in permutations(dims):
self.assertEqual(in_t.flip(p_dims), out_t)
if len(p_dims) > 0:
# Wrap 1st dim
self.assertEqual(
in_t.flip((-n + p_dims[0],) + p_dims[1:]), out_t
)
def gen_data():
# Basic tests
data = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2)
nonctg = make_from_size((2, 2, 2), noncontiguous=True).copy_(data)
dims_result = (
(0, make_from_data([5, 6, 7, 8, 1, 2, 3, 4]).view(2, 2, 2)),
(1, make_from_data([3, 4, 1, 2, 7, 8, 5, 6]).view(2, 2, 2)),
(2, make_from_data([2, 1, 4, 3, 6, 5, 8, 7]).view(2, 2, 2)),
((0, 1), make_from_data([7, 8, 5, 6, 3, 4, 1, 2]).view(2, 2, 2)),
((0, 1, 2), make_from_data([8, 7, 6, 5, 4, 3, 2, 1]).view(2, 2, 2)),
)
for in_tensor, (dims, out_tensor) in product((data, nonctg), dims_result):
yield in_tensor, dims, out_tensor
# Expanded
in_t = make_from_data([1, 2, 3]).view(3, 1).expand(3, 2)
dims = 0
out_t = make_from_data([3, 3, 2, 2, 1, 1]).view(3, 2)
yield in_t, dims, out_t
# Noop on expanded dimension
yield in_t, 1, in_t
# Transposed
in_t = (
make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2).transpose(0, 1)
)
dims = (0, 1, 2)
out_t = make_from_data([8, 7, 4, 3, 6, 5, 2, 1]).view(2, 2, 2)
yield in_t, dims, out_t
# Rectangular case
in_t = make_from_data([1, 2, 3, 4, 5, 6]).view(2, 3)
dims = 0
out_t = make_from_data([[4, 5, 6], [1, 2, 3]])
yield in_t, dims, out_t
dims = 1
out_t = make_from_data([[3, 2, 1], [6, 5, 4]])
yield in_t, dims, out_t
# vectorized NCHW cases (images)
if device == "cpu" and dtype != torch.bfloat16:
for mf in [torch.contiguous_format, torch.channels_last]:
for c in [2, 3, 8, 16]:
in_t = make_from_size((2, c, 32, 32)).contiguous(
memory_format=mf
)
np_in_t = in_t.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_t, 3, out_t
np_out_t = np_in_t[:, :, ::-1, :].copy()
out_t = torch.from_numpy(np_out_t)
yield in_t, 2, out_t
# non-contig cases
in_tt = in_t[..., ::2, :]
np_in_t = in_tt.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_tt, 3, out_t
in_tt = in_t[..., ::2]
np_in_t = in_tt.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_tt, 3, out_t
# Noops (edge cases)
# Size 0
in_t = make_from_data(())
yield in_t, 0, in_t
yield in_t, (), in_t
# dims = ()
in_t = make_from_size((3, 2, 1))
yield in_t, (), in_t
# Zero elements, non-zero size
in_t = make_from_size((3, 0, 2))
for i in range(in_t.ndim):
yield in_t, i, in_t
# Size 1
in_t = make_from_size(())
yield in_t, 0, in_t
in_t = make_from_size((1,))
yield in_t, 0, in_t
for in_tensor, dims, out_tensor in gen_data():
test_flip_impl(in_tensor, dims, out_tensor)
# test for shape
size = [2, 3, 4]
data = make_from_size(size)
possible_dims = range(len(size))
test_dims = chain(
combinations(possible_dims, 1), combinations(possible_dims, 2)
)
for dims in test_dims:
self.assertEqual(size, list(data.flip(dims).size()))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_errors(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
data = make_arg((2, 2, 2))
# not allow flip on the same dim more than once
self.assertRaises(RuntimeError, lambda: data.flip(0, 1, 1))
# not allow empty list as input
self.assertRaises(TypeError, lambda: data.flip())
# not allow dim > max dim
self.assertRaises(IndexError, lambda: data.flip(0, 1, 2, 3))
self.assertRaises(IndexError, lambda: data.flip(3))
def _rand_shape(self, dim, min_size, max_size):
return tuple(torch.randint(min_size, max_size + 1, (dim,)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_numpy(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
for ndim in [3, 4]:
shape = self._rand_shape(ndim, 5, 10)
data = make_arg(shape)
# Axis to sample for given shape.
for i in range(1, ndim + 1):
# Check all combinations of `i` axis.
for flip_dim in combinations(range(ndim), i):
torch_fn = partial(torch.flip, dims=flip_dim)
np_fn = partial(np.flip, axis=flip_dim)
self.compare_with_numpy(torch_fn, np_fn, data)
@onlyCUDA # CPU is too slow
@largeTensorTest("17GB") # 4 tensors of 4GB (in, out) x (torch, numpy) + 1GB
@largeTensorTest(
"81GB", "cpu"
) # even for CUDA test, sufficient system memory is required
@unittest.skipIf(IS_JETSON, "Too large for Jetson")
def test_flip_large_tensor(self, device):
t_in = torch.empty(2**32 + 1, dtype=torch.uint8).random_()
torch_fn = partial(torch.flip, dims=(0,))
np_fn = partial(np.flip, axis=0)
self.compare_with_numpy(torch_fn, np_fn, t_in)
del t_in
def _test_fliplr_flipud(self, torch_fn, np_fn, min_dim, max_dim, device, dtype):
for dim in range(min_dim, max_dim + 1):
shape = self._rand_shape(dim, 5, 10)
# Randomly scale the input
if dtype.is_floating_point or dtype.is_complex:
data = torch.randn(*shape, device=device, dtype=dtype)
else:
data = torch.randint(0, 10, shape, device=device, dtype=dtype)
self.compare_with_numpy(torch_fn, np_fn, data)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr(self, device, dtype):
self._test_fliplr_flipud(torch.fliplr, np.fliplr, 2, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr_invalid(self, device, dtype):
x = torch.randn(42).to(dtype)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(x)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(torch.tensor(42, device=device, dtype=dtype))
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud(self, device, dtype):
self._test_fliplr_flipud(torch.flipud, np.flipud, 1, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud_invalid(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, "Input must be >= 1-d."):
torch.flipud(torch.tensor(42, device=device, dtype=dtype))
def test_rot90(self, device):
data = torch.arange(1, 5, device=device).view(2, 2)
self.assertEqual(torch.tensor([1, 2, 3, 4]).view(2, 2), data.rot90(0, [0, 1]))
self.assertEqual(torch.tensor([2, 4, 1, 3]).view(2, 2), data.rot90(1, [0, 1]))
self.assertEqual(torch.tensor([4, 3, 2, 1]).view(2, 2), data.rot90(2, [0, 1]))
self.assertEqual(torch.tensor([3, 1, 4, 2]).view(2, 2), data.rot90(3, [0, 1]))
# test for default args k=1, dims=[0, 1]
self.assertEqual(data.rot90(), data.rot90(1, [0, 1]))
# test for reversed order of dims
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(1, [1, 0]))
# test for modulo of k
self.assertEqual(data.rot90(5, [0, 1]), data.rot90(1, [0, 1]))
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(-1, [0, 1]))
self.assertEqual(data.rot90(-5, [0, 1]), data.rot90(-1, [0, 1]))
# test for dims out-of-range error
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, -3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 2]))
# test tensor with more than 2D
data = torch.arange(1, 9, device=device).view(2, 2, 2)
self.assertEqual(
torch.tensor([2, 4, 1, 3, 6, 8, 5, 7]).view(2, 2, 2), data.rot90(1, [1, 2])
)
self.assertEqual(data.rot90(1, [1, -1]), data.rot90(1, [1, 2]))
# test for errors
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [1, 1]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 1, 2]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0]))
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@dtypes(torch.cfloat, torch.cdouble)
def test_complex_rot90(self, device, dtype):
shape = self._rand_shape(random.randint(2, 4), 5, 10)
for rot_times in range(4):
data = torch.randn(*shape, device=device, dtype=dtype)
torch_fn = partial(torch.rot90, k=rot_times, dims=[0, 1])
np_fn = partial(np.rot90, k=rot_times, axes=[0, 1])
self.compare_with_numpy(torch_fn, np_fn, data)
# TODO: update once warning flag is available to always trigger ONCE warnings
# Ensures nonzero does not throw a warning, even when the as_tuple argument
# is not provided
def test_nonzero_no_warning(self, device):
t = torch.randn((2, 2), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.nonzero(t)
t.nonzero()
self.assertEqual(len(w), 0)
@dtypes(*all_types_and(torch.half, torch.bool, torch.bfloat16))
def test_nonzero(self, device, dtype):
shapes = [
torch.Size((12,)),
torch.Size((12, 1)),
torch.Size((1, 12)),
torch.Size((6, 2)),
torch.Size((3, 2, 2)),
torch.Size((5, 5, 5)),
]
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(
dtype
)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != "xla":
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.float, device=device)
),
)
if (
self.device_type == "cuda"
or self.device_type == TEST_PRIVATEUSE1_DEVICE_TYPE
):
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.long)
),
)
np_array = (
tensor.cpu().numpy()
if dtype != torch.bfloat16
else tensor.float().cpu().numpy()
)
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
def test_nonzero_astuple_out(self, device):
t = torch.randn((3, 3, 3), device=device)
out = torch.empty_like(t, dtype=torch.long)
with self.assertRaises(RuntimeError):
torch.nonzero(t, as_tuple=True, out=out)
self.assertEqual(
torch.nonzero(t, as_tuple=False, out=out), torch.nonzero(t, out=out)
)
# Verifies that JIT script cannot handle the as_tuple kwarg
# See Issue https://github.com/pytorch/pytorch/issues/45499.
def _foo(t):
tuple_result = torch.nonzero(t, as_tuple=True)
nontuple_result = torch.nonzero(t, as_tuple=False)
out = torch.empty_like(nontuple_result)
torch.nonzero(t, as_tuple=False, out=out)
return tuple_result, nontuple_result, out
with self.assertRaises(RuntimeError):
scripted_foo = torch.jit.script(_foo)
# Verifies that JIT tracing works fine
traced_foo = torch.jit.trace(_foo, t)
traced_tuple, traced_nontuple, traced_out = traced_foo(t)
expected_tuple = torch.nonzero(t, as_tuple=True)
expected_nontuple = torch.nonzero(t)
self.assertEqual(traced_tuple, expected_tuple)
self.assertEqual(traced_nontuple, expected_nontuple)
self.assertEqual(traced_out, expected_nontuple)
@onlyNativeDeviceTypes
def test_nonzero_discontiguous(self, device):
shape = (4, 4)
tensor = torch.randint(2, shape, device=device)
tensor_nc = torch.empty(shape[0], shape[1] * 2, device=device)[:, ::2].copy_(
tensor
)
dst1 = tensor.nonzero(as_tuple=False)
dst2 = tensor_nc.nonzero(as_tuple=False)
self.assertEqual(dst1, dst2, atol=0, rtol=0)
dst3 = torch.empty_like(dst1)
data_ptr = dst3.data_ptr()
# expect dst3 storage to be reused
torch.nonzero(tensor, out=dst3)
self.assertEqual(data_ptr, dst3.data_ptr())
self.assertEqual(dst1, dst3, atol=0, rtol=0)
# discontiguous out
dst4 = torch.empty(
dst1.size(0), dst1.size(1) * 2, dtype=torch.long, device=device
)[:, ::2]
data_ptr = dst4.data_ptr()
strides = dst4.stride()
torch.nonzero(tensor, out=dst4)
self.assertEqual(data_ptr, dst4.data_ptr())
self.assertEqual(dst1, dst4, atol=0, rtol=0)
self.assertEqual(strides, dst4.stride())
def test_nonzero_non_diff(self, device):
x = torch.randn(10, requires_grad=True)
nz = x.nonzero()
self.assertFalse(nz.requires_grad)
@dtypes(torch.int64, torch.float, torch.complex128)
def test_sparse_dense_dim(self, device, dtype):
for shape in [(), (2,), (2, 3)]:
if dtype.is_complex or dtype.is_floating_point:
x = torch.rand(shape, device=device, dtype=dtype)
else:
x = torch.randint(-9, 9, shape, device=device, dtype=dtype)
self.assertEqual(x.sparse_dim(), 0)
self.assertEqual(x.dense_dim(), len(shape))
instantiate_device_type_tests(TestShapeOps, globals())
if __name__ == "__main__":
run_tests()
|
import torch
import numpy as np
from itertools import product, combinations, permutations, chain
from functools import partial
import random
import warnings
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfTorchDynamo, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyNativeDeviceTypes,
dtypesIfCUDA, largeTensorTest)
from torch.testing._internal.common_dtype import all_types_and_complex_and, all_types, all_types_and
|
import random
import unittest
import warnings
from functools import partial
from itertools import chain, combinations, permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
all_types_and_complex_and,
)
from torch.testing._internal.common_utils import (
IS_JETSON,
run_tests,
skipIfTorchDynamo,
TEST_PRIVATEUSE1_DEVICE_TYPE,
TestCase,
torch_to_numpy_dtype_dict,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sort_and_select.py
|
compare
|
def compare(t, k, dim, dir):
topKVal, topKInd = t.topk(k, dim, dir, True)
sortKVal, sortKInd = topKViaSort(t, k, dim, dir)
compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim)
t = torch.rand(random.randint(1, SIZE),
random.randint(1, SIZE),
random.randint(1, SIZE), device=device)
for _kTries in range(3):
for _dimTries in range(3):
for transpose in (True, False):
for dir in (True, False):
testTensor = t
if transpose:
dim1 = random.randrange(t.ndimension())
dim2 = dim1
while dim1 == dim2:
dim2 = random.randrange(t.ndimension())
testTensor = t.transpose(dim1, dim2)
dim = random.randrange(testTensor.ndimension())
k = random.randint(1, testTensor.size(dim))
compare(testTensor, k, dim, dir)
# This tests the code path where on CUDA, topk is implemented with sort.
t = torch.randn((2, 100000), device=device)
compare(t, 2000, 1, True)
compare(t, 2000, 1, False)
# This tests the code path where on CUDA, topk is implemented with multiblock
t = torch.randn((2, 10000), device=device)
compare(t, 2000, 1, True)
compare(t, 2000, 1, False)
|
def compare(t, k, dim, dir):
topKVal, topKInd = t.topk(k, dim, dir, True)
sortKVal, sortKInd = topKViaSort(t, k, dim, dir)
compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim)
SIZE = 100
t = torch.rand(
random.randint(1, SIZE),
random.randint(1, SIZE),
random.randint(1, SIZE),
device=device,
)
for _kTries in range(3):
for _dimTries in range(3):
for transpose in (True, False):
for dir in (True, False):
testTensor = t
if transpose:
dim1 = random.randrange(t.ndimension())
dim2 = dim1
while dim1 == dim2:
dim2 = random.randrange(t.ndimension())
testTensor = t.transpose(dim1, dim2)
dim = random.randrange(testTensor.ndimension())
k = random.randint(1, testTensor.size(dim))
compare(testTensor, k, dim, dir)
# This tests the code path where on CUDA, topk is implemented with sort.
t = torch.randn((2, 100000), device=device)
compare(t, 2000, 1, True)
compare(t, 2000, 1, False)
# This tests the code path where on CUDA, topk is implemented with multiblock
t = torch.randn((2, 10000), device=device)
compare(t, 2000, 1, True)
compare(t, 2000, 1, False)
|
import torch
import numpy as np
import random
from torch import nan
from itertools import permutations, product
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and
from torch.testing._internal.common_utils import \
(TestCase, run_tests, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes,
onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
SIZE = 100
from itertools import chain, combinations
|
import random
from itertools import permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCPU,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
floating_types_and,
integral_types,
)
from torch.testing._internal.common_utils import (
run_tests,
skipIfTorchDynamo,
slowTest,
TestCase,
)
from itertools import chain, combinations
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sort_and_select.py
|
test_topk_quantized_scalar_input
|
def test_topk_quantized_scalar_input(self):
# Calling topk on a quantized scalar input used to segfault,
# see https://github.com/pytorch/pytorch/issues/116324
x = torch.quantize_per_tensor(torch.randn(()), 0.1, 10, torch.qint8)
x.topk(1)
|
import random
from itertools import permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCPU,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
floating_types_and,
integral_types,
)
from torch.testing._internal.common_utils import (
run_tests,
skipIfTorchDynamo,
slowTest,
TestCase,
)
class TestSortAndSelect(TestCase):
from itertools import chain, combinations
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sort_and_select.py
|
run_test
|
def run_test(device, dtype):
x = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]],
[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]]],
dtype=dtype,
device=device)
x_empty = torch.empty(5, 0, dtype=dtype, device=device)
x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)
x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
x_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_unique_dim0 = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]]],
dtype=dtype,
device=device)
expected_inverse_dim0 = torch.tensor([0, 0])
expected_counts_dim0 = torch.tensor([2])
expected_unique_dim1 = torch.tensor([[[0., 1.],
[1., 1.],
[2., 1.]],
[[0., 1.],
[1., 1.],
[2., 1.]]],
dtype=dtype,
device=device)
expected_unique_dim1_bool = torch.tensor([[[False, True], [True, True]],
[[False, True], [True, True]]],
dtype=torch.bool,
device=device)
expected_inverse_dim1 = torch.tensor([1, 0, 2, 0])
expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0])
expected_counts_dim1 = torch.tensor([2, 1, 1])
expected_counts_dim1_bool = torch.tensor([2, 2])
expected_unique_dim2 = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]],
[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]]],
dtype=dtype,
device=device)
expected_inverse_dim2 = torch.tensor([0, 1])
expected_counts_dim2 = torch.tensor([1, 1])
expected_unique_empty = torch.empty(5, 0, dtype=dtype, device=device)
expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)
expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
expected_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device)
expected_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device)
# dim0
x_unique = torch.unique(x, dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
x_unique, x_inverse = torch.unique(
x,
return_inverse=True,
dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_inverse_dim0, x_inverse)
x_unique, x_counts = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_counts_dim0, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_inverse_dim0, x_inverse)
self.assertEqual(expected_counts_dim0, x_counts)
# dim1
x_unique = torch.unique(x, dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
else:
self.assertEqual(expected_unique_dim1, x_unique)
x_unique, x_inverse = torch.unique(
x,
return_inverse=True,
dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_inverse_dim1_bool, x_inverse)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_inverse_dim1, x_inverse)
x_unique, x_counts = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_counts_dim1_bool, x_counts)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_counts_dim1, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_inverse_dim1_bool, x_inverse)
self.assertEqual(expected_counts_dim1_bool, x_counts)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_inverse_dim1, x_inverse)
self.assertEqual(expected_counts_dim1, x_counts)
# dim2
x_unique = torch.unique(x, dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
x_unique, x_inverse = torch.unique(
x,
return_inverse=True,
dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_inverse_dim2, x_inverse)
x_unique, x_counts = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_counts_dim2, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_inverse_dim2, x_inverse)
self.assertEqual(expected_counts_dim2, x_counts)
# test empty tensor
x_unique, x_inverse, x_counts = torch.unique(
x_empty,
return_inverse=True,
return_counts=True,
dim=1)
self.assertEqual(expected_unique_empty, x_unique)
self.assertEqual(expected_inverse_empty, x_inverse)
self.assertEqual(expected_counts_empty, x_counts)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
x_unique, x_inverse, x_counts = torch.unique(
x_nan,
return_inverse=True,
return_counts=True,
dim=0)
self.assertEqual(expected_unique_nan, x_unique)
self.assertEqual(expected_inverse_nan, x_inverse)
self.assertEqual(expected_counts_nan, x_counts)
# test not a well formed tensor
# Checking for runtime error, as this is the expected behaviour
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty,
return_inverse=True,
return_counts=True,
dim=1)
# test along dim2
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty_another,
return_inverse=True,
return_counts=True,
dim=2)
# test consecutive version
y = torch.tensor(
[[0, 1],
[0, 1],
[0, 1],
[1, 2],
[1, 2],
[3, 4],
[0, 1],
[0, 1],
[3, 4],
[1, 2]],
dtype=dtype,
device=device
)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
y_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_y_unique = torch.tensor(
[[0, 1],
[1, 2],
[3, 4],
[0, 1],
[3, 4],
[1, 2]],
dtype=dtype,
device=device
)
expected_y_inverse = torch.tensor([0, 0, 0, 1, 1, 2, 3, 3, 4, 5], dtype=torch.int64, device=device)
expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device)
expected_y_inverse_bool = torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device)
expected_y_counts_bool = torch.tensor([3, 3, 2, 2], dtype=torch.int64, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
expected_y_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_y_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device)
expected_y_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device)
y_unique, y_inverse, y_counts = torch.unique_consecutive(y, return_inverse=True, return_counts=True, dim=0)
if x.dtype == torch.bool:
self.assertEqual(expected_y_inverse_bool, y_inverse)
self.assertEqual(expected_y_counts_bool, y_counts)
else:
self.assertEqual(expected_y_inverse, y_inverse)
self.assertEqual(expected_y_counts, y_counts)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
y_unique, y_inverse, y_counts = torch.unique_consecutive(
y_nan,
return_inverse=True,
return_counts=True,
dim=0)
self.assertEqual(expected_y_unique_nan, y_unique)
self.assertEqual(expected_y_inverse_nan, y_inverse)
self.assertEqual(expected_y_counts_nan, y_counts)
run_test(device, torch.float)
run_test(device, torch.double)
run_test(device, torch.long)
run_test(device, torch.uint8)
run_test(device, torch.bool)
|
def run_test(device, dtype):
x = torch.tensor(
[
[[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],
[[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],
],
dtype=dtype,
device=device,
)
x_empty = torch.empty(5, 0, dtype=dtype, device=device)
x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)
x_ill_formed_empty_another = torch.empty(
5, 0, 5, dtype=dtype, device=device
)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
x_nan = torch.tensor(
[float("nan"), 0, 0, float("nan"), float("nan"), 1],
dtype=dtype,
device=device,
)
expected_unique_dim0 = torch.tensor(
[[[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]]],
dtype=dtype,
device=device,
)
expected_inverse_dim0 = torch.tensor([0, 0])
expected_counts_dim0 = torch.tensor([2])
expected_unique_dim1 = torch.tensor(
[
[[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]],
[[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]],
],
dtype=dtype,
device=device,
)
expected_unique_dim1_bool = torch.tensor(
[[[False, True], [True, True]], [[False, True], [True, True]]],
dtype=torch.bool,
device=device,
)
expected_inverse_dim1 = torch.tensor([1, 0, 2, 0])
expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0])
expected_counts_dim1 = torch.tensor([2, 1, 1])
expected_counts_dim1_bool = torch.tensor([2, 2])
expected_unique_dim2 = torch.tensor(
[
[[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],
[[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],
],
dtype=dtype,
device=device,
)
expected_inverse_dim2 = torch.tensor([0, 1])
expected_counts_dim2 = torch.tensor([1, 1])
expected_unique_empty = torch.empty(5, 0, dtype=dtype, device=device)
expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)
expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
expected_unique_nan = torch.tensor(
[float("nan"), 0, float("nan"), float("nan"), 1],
dtype=dtype,
device=device,
)
expected_inverse_nan = torch.tensor(
[0, 1, 1, 2, 3, 4], dtype=torch.long, device=device
)
expected_counts_nan = torch.tensor(
[1, 2, 1, 1, 1], dtype=torch.long, device=device
)
# dim0
x_unique = torch.unique(x, dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
x_unique, x_inverse = torch.unique(x, return_inverse=True, dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_inverse_dim0, x_inverse)
x_unique, x_counts = torch.unique(
x, return_inverse=False, return_counts=True, dim=0
)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_counts_dim0, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x, return_inverse=True, return_counts=True, dim=0
)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_inverse_dim0, x_inverse)
self.assertEqual(expected_counts_dim0, x_counts)
# dim1
x_unique = torch.unique(x, dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
else:
self.assertEqual(expected_unique_dim1, x_unique)
x_unique, x_inverse = torch.unique(x, return_inverse=True, dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_inverse_dim1_bool, x_inverse)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_inverse_dim1, x_inverse)
x_unique, x_counts = torch.unique(
x, return_inverse=False, return_counts=True, dim=1
)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_counts_dim1_bool, x_counts)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_counts_dim1, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x, return_inverse=True, return_counts=True, dim=1
)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_inverse_dim1_bool, x_inverse)
self.assertEqual(expected_counts_dim1_bool, x_counts)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_inverse_dim1, x_inverse)
self.assertEqual(expected_counts_dim1, x_counts)
# dim2
x_unique = torch.unique(x, dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
x_unique, x_inverse = torch.unique(x, return_inverse=True, dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_inverse_dim2, x_inverse)
x_unique, x_counts = torch.unique(
x, return_inverse=False, return_counts=True, dim=2
)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_counts_dim2, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x, return_inverse=True, return_counts=True, dim=2
)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_inverse_dim2, x_inverse)
self.assertEqual(expected_counts_dim2, x_counts)
# test empty tensor
x_unique, x_inverse, x_counts = torch.unique(
x_empty, return_inverse=True, return_counts=True, dim=1
)
self.assertEqual(expected_unique_empty, x_unique)
self.assertEqual(expected_inverse_empty, x_inverse)
self.assertEqual(expected_counts_empty, x_counts)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
x_unique, x_inverse, x_counts = torch.unique(
x_nan, return_inverse=True, return_counts=True, dim=0
)
self.assertEqual(expected_unique_nan, x_unique)
self.assertEqual(expected_inverse_nan, x_inverse)
self.assertEqual(expected_counts_nan, x_counts)
# test not a well formed tensor
# Checking for runtime error, as this is the expected behaviour
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty, return_inverse=True, return_counts=True, dim=1
)
# test along dim2
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty_another,
return_inverse=True,
return_counts=True,
dim=2,
)
# test consecutive version
y = torch.tensor(
[
[0, 1],
[0, 1],
[0, 1],
[1, 2],
[1, 2],
[3, 4],
[0, 1],
[0, 1],
[3, 4],
[1, 2],
],
dtype=dtype,
device=device,
)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
y_nan = torch.tensor(
[float("nan"), 0, 0, float("nan"), float("nan"), 1],
dtype=dtype,
device=device,
)
expected_y_unique = torch.tensor(
[[0, 1], [1, 2], [3, 4], [0, 1], [3, 4], [1, 2]],
dtype=dtype,
device=device,
)
expected_y_inverse = torch.tensor(
[0, 0, 0, 1, 1, 2, 3, 3, 4, 5], dtype=torch.int64, device=device
)
expected_y_counts = torch.tensor(
[3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device
)
expected_y_inverse_bool = torch.tensor(
[0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device
)
expected_y_counts_bool = torch.tensor(
[3, 3, 2, 2], dtype=torch.int64, device=device
)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
expected_y_unique_nan = torch.tensor(
[float("nan"), 0, float("nan"), float("nan"), 1],
dtype=dtype,
device=device,
)
expected_y_inverse_nan = torch.tensor(
[0, 1, 1, 2, 3, 4], dtype=torch.long, device=device
)
expected_y_counts_nan = torch.tensor(
[1, 2, 1, 1, 1], dtype=torch.long, device=device
)
y_unique, y_inverse, y_counts = torch.unique_consecutive(
y, return_inverse=True, return_counts=True, dim=0
)
if x.dtype == torch.bool:
self.assertEqual(expected_y_inverse_bool, y_inverse)
self.assertEqual(expected_y_counts_bool, y_counts)
else:
self.assertEqual(expected_y_inverse, y_inverse)
self.assertEqual(expected_y_counts, y_counts)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
y_unique, y_inverse, y_counts = torch.unique_consecutive(
y_nan, return_inverse=True, return_counts=True, dim=0
)
self.assertEqual(expected_y_unique_nan, y_unique)
self.assertEqual(expected_y_inverse_nan, y_inverse)
self.assertEqual(expected_y_counts_nan, y_counts)
# Test dim is sorted same as NumPy with dims >= 3
x = torch.tensor(
[
[
[[1, 0, 1, 0, 1, 1], [0, 1, 1, 0, 1, 1]],
[[0, 1, 1, 0, 0, 1], [0, 0, 0, 1, 0, 0]],
],
[
[[0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 1, 1]],
[[0, 0, 1, 1, 0, 1], [1, 1, 0, 0, 0, 0]],
],
],
dtype=dtype,
device=device,
)
xn = x.cpu().numpy()
for d in range(x.dim()):
t = torch.unique(x, dim=d)
n = np.unique(xn, axis=d)
self.assertEqual(t.cpu().numpy(), n)
run_test(device, torch.float)
run_test(device, torch.double)
run_test(device, torch.long)
run_test(device, torch.uint8)
run_test(device, torch.bool)
|
import torch
import numpy as np
import random
from torch import nan
from itertools import permutations, product
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and
from torch.testing._internal.common_utils import \
(TestCase, run_tests, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes,
onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
SIZE = 100
from itertools import chain, combinations
|
import random
from itertools import permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCPU,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
floating_types_and,
integral_types,
)
from torch.testing._internal.common_utils import (
run_tests,
skipIfTorchDynamo,
slowTest,
TestCase,
)
from itertools import chain, combinations
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sort_and_select.py
|
test_sort_overflow
|
instantiate_device_type_tests(TestSortAndSelect, globals())
if __name__ == '__main__':
run_tests()
|
def test_sort_overflow(self, device, dtype):
"Regression test for https://github.com/pytorch/pytorch/issues/111189"
prev_num_threads = torch.get_num_threads()
try:
low = 0 if dtype == torch.uint8 else -1
x = torch.full((32768,), low, dtype=dtype, device=device)
x[:100] = torch.iinfo(x.dtype).max
torch.set_num_threads(1)
uv = x.sort().values.unique()
self.assertEqual(uv.size(0), 2)
finally:
torch.set_num_threads(prev_num_threads)
|
import random
from itertools import permutations, product
import numpy as np
import torch
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCPU,
dtypesIfCUDA,
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
)
from torch.testing._internal.common_dtype import (
all_types,
all_types_and,
floating_types_and,
integral_types,
)
from torch.testing._internal.common_utils import (
run_tests,
skipIfTorchDynamo,
slowTest,
TestCase,
)
class TestSortAndSelect(TestCase):
from itertools import chain, combinations
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse.py
|
_op_supports_any_sparse
|
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
if TEST_SCIPY:
import scipy.sparse
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# batched grad doesn't support sparse
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
|
def _op_supports_any_sparse(op):
return (op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
if TEST_SCIPY:
import scipy.sparse
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# batched grad doesn't support sparse
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
import scipy.sparse
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_sparse.py
|
all_sparse_layouts
|
def all_sparse_layouts(test_name='layout', include_strided=False):
return parametrize(test_name, [
subtest(torch.strided, name='Strided'),
subtest(torch.sparse_coo, name='SparseCOO'),
subtest(torch.sparse_csr, name='SparseCSR'),
subtest(torch.sparse_csc, name='SparseCSC'),
subtest(torch.sparse_bsr, name='SparseBSR'),
subtest(torch.sparse_bsc, name='SparseBSC'),
][(0 if include_strided else 1):])
class CrossRefSparseFakeMode(torch._subclasses.CrossRefFakeMode):
def __init__(self):
super().__init__(
self.ignore_op, check_strides=False,
check_aliasing=False,
) # TODO: enable stride/alias checking
# empty_like excluded for now due to sparse complex
# aten._to_dense.default this one is getting called with csc
@staticmethod
def ignore_op(func):
return func in (
torch.ops.aten.empty_like.default,
torch.ops.aten.set_.source_Storage_storage_offset,
torch.ops.aten.sspaddmm.out,
torch.ops.aten._spdiags.default,
torch.ops.aten._to_dense.default,
torch.ops.aten.indices.default,
torch.ops.aten._indices.default,
torch.ops.aten.values.default,
torch.ops.aten._values.default,
)
class TestSparseBase(TestCase):
def run(self, result=None):
if TEST_WITH_CROSSREF:
with CrossRefSparseFakeMode():
return super().run(result)
else:
return super().run(result)
class TestSparse(TestSparseBase):
def setUp(self):
TestCase.setUp(self)
self.index_tensor = lambda *args, **kwargs: torch.tensor(*args, **kwargs, dtype=torch.int64)
def sparse_empty_factory(*args, **kwargs):
kwargs['layout'] = kwargs.get('layout', torch.sparse_coo)
return torch.empty(*args, **kwargs)
self.sparse_empty = sparse_empty_factory
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
self.legacy_sparse_tensor = torch.sparse.DoubleTensor
def _gen_sparse(self, sparse_dim, nnz, with_size, dtype, device, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dim
x, i, v = self.genSparseTensor(with_size, sparse_dim, nnz, not coalesced, dtype=dtype, device=device)
if not coalesced:
self.assert_uncoalesced(x)
return x, i, v
def assert_uncoalesced(self, x):
"""
Test if a CPU tensor is uncoalesced. This is used to ensure
correctness of the uncoalesced tensor generation algorithm.
"""
assert not x.is_coalesced()
existing_indices = set()
for i in range(x._nnz()):
index = str(x._indices()[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
def randn(self, *args, **kwargs):
"""
Variant of torch.randn that also works in the TEST_CUDA case.
"""
# TODO: Put this in torch.cuda.randn
return torch.empty(*args, **kwargs).normal_()
@dtypes(torch.double)
def test_print_coalesced(self, device, dtype):
self._test_print(device, dtype, True)
@dtypes(torch.double)
def test_print_uncoalesced(self, device, dtype):
self._test_print(device, dtype, False)
def _test_print(self, device, dtype, coalesced):
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
printed = []
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
printed.append("# shape: {}".format(torch.Size(shape)))
printed.append("# nnz: {}".format(nnz))
printed.append("# sparse_dim: {}".format(sparse_dim))
printed.append("# indices shape: {}".format(indices_shape))
printed.append("# values shape: {}".format(values_shape))
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape, dtype=dtype, device=device)
dtypes = [torch.int32]
if values.dtype == torch.double:
dtypes.append(torch.float)
else:
dtypes.append(torch.double)
for dtype in dtypes:
printed.append("########## {} ##########".format(dtype))
x = sp_tensor.detach().to(dtype)
printed.append("# sparse tensor")
printed.append(str(x))
if x.dtype.is_floating_point:
printed.append("# after requires_grad_")
printed.append(str(x.requires_grad_()))
printed.append("# after addition")
printed.append(str(x + x))
printed.append("# _indices")
printed.append(str(x._indices()))
printed.append("# _values")
printed.append(str(x._values()))
printed.append('')
self.assertExpected('\n'.join(printed))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_basic(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
@coalescedonoff
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
@precisionOverride({torch.bfloat16: 1e-2})
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1991")
def test_coalesce(self, device, dtype, coalesced):
def _test_coalesce(t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map: Dict[Any, Any] = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(value_map.keys())
_new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(_new_values)
else:
new_values = torch.stack(_new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
for empty_i, empty_v, empty_nnz in itertools.product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
t, _, _ = self._gen_sparse(len(sparse_size), nnz, sparse_size + dense_size, dtype, device, coalesced)
_test_coalesce(t) # this tests correctness
@dtypes(torch.double)
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/89395")
def test_coalesce_reference_cycle(self, device, dtype):
# Test coalesce doesn't create autograd graph cycles (gh-52253)
# Sanity check that the helper class works as expected
t = torch.rand(2)
t_ref = torch._C._WeakTensorRef(t)
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
def test_sparse_sum():
i = torch.tensor([[0], [4]], dtype=torch.long, device=device)
v = torch.tensor([[[-0.4567, -1.8797, 0.0380, 1.4316]]],
dtype=dtype, device=device)
S = torch.sparse_coo_tensor(i, v)
S = S.coalesce()
S.requires_grad_(True)
S2 = S.coalesce()
self.assertTrue(S2.is_coalesced())
return torch._C._WeakTensorRef(S2)
ref = test_sparse_sum()
self.assertTrue(ref.expired())
@dtypes(torch.double)
def test_ctor_large_sizes(self, device, dtype):
# Test that integer overflow is detected when computing numel
# of a sparse tensor with large dimensions (gh-57416). Notice
# that numel is computed internally when constructing a
# tensor, hence the overflow may appear during the tensor
# construction step.
N = 100000
indices = torch.tensor([[N, N - 1]] * 4, dtype=torch.int64, device=device)
values = torch.tensor([1, 2], dtype=dtype, device=device)
self.assertRaises(RuntimeError,
lambda: torch.sparse_coo_tensor(
indices, values, (N + 1,) * 4, device=device))
@dtypes(torch.double, torch.cdouble)
def test_ctor_size_checks(self, device, dtype):
indices = self.index_tensor([
[0, 0, 0],
[0, 3, 0],
[0, 0, 0],
[0, 0, 0],
], device=device)
values = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
# indices inconsistent with size
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 1, 1])))
# values inconsistent with size
values = torch.tensor([
[2, 1, 2, 1],
[1, 0, 5, 2],
], dtype=dtype, device=device)
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 4, 2, 1])))
@dtypes(*floating_and_complex_types_and(torch.float16, torch.bfloat16))
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupport triggers assertion error")
def test_to_dense(self, device, dtype):
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
@coalescedonoff
@dtypes(torch.float16, torch.bfloat16, torch.float64, torch.int, torch.cfloat, torch.cdouble)
def test_to_sparse(self, device, dtype, coalesced):
shape = [5, 2, 10, 4]
max_nnz = 1
for value_type in [torch.double, torch.cdouble]:
for dim, dim_sz in enumerate(shape, 1):
max_nnz *= dim_sz
rnnz = torch.randint(2, max_nnz, (1,)).item()
for nnz in [0, 1, rnnz]:
expected, _, _ = self._gen_sparse(dim, nnz, shape, dtype=value_type, device=device,
coalesced=coalesced)
expected = expected.to(dtype)
d = expected.to_dense()
result = d.to_sparse(dim)
self.assertEqual(d, result.to_dense())
self.assertEqual(expected.size(), result.size())
self.assertEqual(dim, result.sparse_dim())
@dtypes(torch.double, torch.cdouble)
def test_sparse_bool(self, device, dtype):
a = torch.tensor([True, False], dtype=dtype, device=device).to(torch.bool)
b = a.to_sparse().to_dense()
self.assertEqual(a, b)
@dtypes(torch.double, torch.cdouble)
def test_scalar(self, device, dtype):
# tensor with value
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1), 12.3, [], dtype=dtype, device=device)
self.assertEqual(1, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
# tensor with multiple values
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1).expand(0, 2),
[12.3, 12.3], [], dtype=dtype, device=device)
self.assertEqual(2, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3 * 2, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a.coalesce(), a.coalesce().to_dense().to_sparse())
# tensor without value
a = self.sparse_empty((), dtype=dtype, device=device)
self.assertEqual(0, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(0, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
@dtypes(torch.double, torch.cdouble)
def test_shared(self, device, dtype):
i = self.index_tensor([[2]], device=device)
v = torch.tensor([5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
v[0] = 6
self.assertEqual(torch.tensor([0, 0, 6], dtype=dtype, device=device), self.safeToDense(x))
i[0][0] = 0
self.assertEqual(torch.tensor([6, 0, 0], dtype=dtype, device=device), self.safeToDense(x))
i = self.index_tensor([[2]], device=device)
v = torch.empty((1, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
i[0][0] = 0
self.assertEqual(torch.empty((3, 0), dtype=dtype, device=device), self.safeToDense(x))
@dtypes(torch.double, torch.cdouble)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupport triggers assertion error")
def test_to_dense_hybrid(self, device, dtype):
def test_tensor(x, res):
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
self.assertEqual(res, self.safeToDense(x))
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.tensor([[2, 3], [1, 2], [3, 4], [4, 5]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2]))
res = torch.tensor([
[[2, 3],
[0, 0],
[0, 0],
[0, 0]],
[[1, 2],
[0, 0],
[0, 0],
[0, 0]],
[[3, 4],
[0, 0],
[0, 0],
[4, 5]],
], dtype=dtype, device=device)
test_tensor(x, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.empty((4, 2, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2, 0]))
res = torch.empty((3, 4, 2, 0), dtype=dtype, device=device)
test_tensor(x, res)
@dtypes(torch.double, torch.cdouble)
def test_contig(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([6, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@dtypes(torch.double, torch.cdouble)
def test_contig_hybrid(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([
[1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
[6, 7], [7, 8], [8, 9], [9, 10], [10, 11],
], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100, 2]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([
[2, 3], [1, 2], [6, 7], [4, 5], [10, 11],
[3, 4], [5, 6], [9, 10], [8, 9], [7, 8],
], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 3, 3], [2, 2, 2], [4, 4, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([[2, 2, 2], [1, 1, 1], [3, 3, 3], [4, 4, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 2, 3], [2, 1, 1], [4, 3, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([[6, 4, 5], [4, 3, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_clone(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
if not coalesced:
self.assertFalse(x.is_coalesced())
y = x.clone()
self.assertFalse(y.is_coalesced())
x = x.coalesce()
self.assertTrue(x.is_coalesced())
y = x.clone()
self.assertTrue(y.is_coalesced())
test_shape(4, 20, 5)
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
@precisionOverride({torch.bfloat16: 2e-2})
def test_Sparse_to_Sparse_copy_(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor)
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
# test copy
x2_dense = x2.to_dense()
x1.copy_(x2)
self.assertEqual(x2_dense, x1.to_dense())
# test type conversion (when x1.copy_(x2), x1.dtype should stay the same)
x1 = x1.to(torch.float32)
x2 = x2.to(torch.float16)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
x2 = x2.to(torch.float64)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
# test no broadcast
self.assertRaises(RuntimeError, lambda: x1.copy_(x2.narrow_copy(0, 0, 1)))
# test raise error on copy_() between dense and sparse Tensors
self.assertRaises(RuntimeError, lambda: x1.copy_(torch.randn(5, 5)))
# test autograd
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone()
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to_dense())
self.assertEqual(None, x1.grad)
@coalescedonoff
@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
@dtypes(torch.double, torch.cdouble)
def test_Sparse_to_Sparse_copy_multi_gpu(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor) across GPU devices
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x1 = x1.to('cuda:0')
def test_cross_device(x1, x2):
x1_device = x1.device
x1.copy_(x2)
self.assertEqual(x2.to('cuda:0').to_dense(), x1.to_dense())
self.assertEqual(x1_device, x1.device)
test_cross_device(x1, x2.to('cuda:1')) # test across gpu devices
test_cross_device(x1, x2.to('cpu')) # test between cpu and gpu
# test autograd
x2 = x2.to('cuda:1')
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone().to('cuda:0')
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to('cuda:0').to_dense())
self.assertEqual(None, x1.grad)
@onlyCUDA
def test_cuda_empty(self, device):
def test_tensor(x):
y = x.to(device)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
x = y.cpu()
self.assertEqual(y.sparse_dim(), x.sparse_dim())
self.assertEqual(y.dense_dim(), x.dense_dim())
x = torch.sparse.FloatTensor(2, 3, 4)
test_tensor(x)
x = torch.sparse.HalfTensor(2, 3, 4)
test_tensor(x)
x = torch.cuda.sparse.HalfTensor(2, 3, 4)
test_tensor(x)
x = torch.sparse.FloatTensor(2, 3, 4, 0)
test_tensor(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_transpose(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
test_shape(4, 6, 3)
test_shape(4, 3, [7, 7, 7, 3, 3, 3, 0])
test_shape(4, 0, [0, 0, 7, 3, 3, 3, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupport triggers assertion error")
def test_permute(self, device, dtype, coalesced):
# trivial checks
s = torch.rand(3, 3, 3, device=device, dtype=dtype).to_sparse()
with self.assertRaisesRegex(RuntimeError, "does not match the length"):
s.permute(dims=(1, 0))
with self.assertRaisesRegex(RuntimeError, "duplicate dims"):
s.permute(dims=(1, 1, 1))
def test_shape(sparse_dims, nnz, with_size):
ndim = len(with_size)
valid_sparse_dims = torch.arange(-ndim, -ndim + sparse_dims)
valid_dense_dims = torch.arange(-ndim + sparse_dims, 0)
for dims in itertools.permutations(range(-ndim, 0)):
s = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
d = self.safeToDense(s)
dims_sparse, _ = torch.tensor(dims[:sparse_dims]).sort()
dims_dense, _ = torch.tensor(dims[sparse_dims:]).sort()
if (valid_sparse_dims == dims_sparse).all() and (valid_dense_dims == dims_dense).all():
# if valid permutation, test for correctness
s_permuted = s.permute(dims)
self.assertEqual(s_permuted, d.permute(dims))
# if s is coalesced, and perm does not touch 0-dim,
# the result has to be coalesced as well
if dims[0] == 0:
self.assertEqual(s_permuted.is_coalesced(), s.is_coalesced())
else:
self.assertFalse(s_permuted.is_coalesced())
gradcheck(lambda t: t.permute(dims).to_dense(), s.requires_grad_(True), check_sparse_nnz=True)
else:
# otherwise check if exception is thrown
fail_message = "transpositions between sparse and dense dimensions are not allowed"
with self.assertRaisesRegex(RuntimeError, fail_message):
s.permute(dims)
test_shape(2, 3, [2, 3, 4, 5])
test_shape(2, 3, [2, 2, 0])
# if nnz=0, it is not true that t == t.to_dense().to_sparse()
# unless t.sparse_dim == t.dim (i.e. t is not hybrid)
test_shape(3, 0, [0, 0, 2])
@coalescedonoff
@onlyCPU
@dtypes(torch.double)
def test_coalesce_transpose_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [dj, di], dtype, device, coalesced)
y = torch.randn(dj, dk, dtype=dtype, device=device)
x_coalesced = x.coalesce()
self.assertTrue(x_coalesced.is_coalesced())
x_coalesced_t = x_coalesced.t()
# Transpose is `colasced`-preserving if the indices tensor is empty.
self.assertEqual(x_coalesced_t.is_coalesced(), di * nnz == 0)
res = torch.mm(x_coalesced_t, y)
expected = torch.mm(self.safeToDense(x_coalesced_t), y)
self.assertEqual(res, expected)
test_shape(10, 20, 30, 20)
test_shape(0, 20, 30, 0)
test_shape(10, 0, 30, 0)
test_shape(10, 20, 0, 0)
test_shape(10, 20, 0, 20)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1166")
@dtypes(torch.double, torch.cdouble)
def test_t_empty(self, device, dtype):
def test_in_place(x):
shape_original = x.shape
x.t_()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), x.size())
self.assertEqual(0, x._indices().numel())
self.assertEqual(0, x._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
def test_not_in_place(x):
shape_original = x.shape
y = x.t()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), y.size())
self.assertEqual(0, y._indices().numel())
self.assertEqual(0, y._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
x = self.sparse_empty(2, 3, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
x = self.sparse_empty(2, 0, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_add_zeros(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
zeros = torch.zeros(sizes, layout=torch.sparse_coo).to(x.device)
r1 = zeros + x
r2 = x + zeros
self.assertEqual(r1, x)
self.assertEqual(r2, x)
test_shape(1, 20, [1])
test_shape(4, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 0])
@dtypes(torch.double, torch.cdouble)
def test_add_sub_nnz(self, device, dtype):
# nnz should not grow unbounded (gh-34964)
x = torch.randn(10, dtype=dtype, device=device).to_sparse()
x.add_(x)
x.add_(x)
self.assertLessEqual(x._nnz(), 10)
x.sub_(2 * x)
x.sub_(2 * x)
self.assertLessEqual(x._nnz(), 10)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_cat(self, device, dtype, coalesced):
# shapes: list of tuples (sparse_dims, nnz, sizes)
def test_shapes(shapes, dim, fail_message=None):
inputs = [self._gen_sparse(shape[0], shape[1], shape[2], dtype, device, coalesced)[0]
for shape in shapes]
if fail_message:
with self.assertRaisesRegex(RuntimeError, fail_message):
torch.cat(inputs, dim)
else:
result = torch.cat(inputs, dim)
dense_result = torch.cat([t.to_dense() for t in inputs], dim)
self.assertEqual(dense_result, result.to_dense())
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], 1)
# mismatched sizes
test_shapes([(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4])], 0,
"All tensors must have the same shape: \\[2, 3, 4].*\\[2, 1, 4]")
# hybrid sparse/dense
test_shapes(
[(2, 10, [2, 3, 4]), (2, 10, [2, 1, 4]), (2, 10, [2, 4, 4])], 1)
# cat along dense dim
test_shapes([(2, 10, [2, 3, 4]), (2, 10, [2, 3, 7])], 2)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 1)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 2)
# mismatched dimensions
test_shapes([(2, 10, [2, 3, 4]), (3, 10, [2, 3, 4])], 0,
"All tensors must have the same.*2, 1, but tensor at position 1 has 3, 0.")
# wrapped dimension
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], -2)
# sparse with dense
sp = self._gen_sparse(3, 10, [2, 3, 4], dtype, device, coalesced)[0]
dn = sp.to_dense()
with self.assertRaisesRegex(RuntimeError,
"Concatenating sparse tensors, but a dense tensor was found at position 1."):
torch.cat((sp, dn))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_unsqueeze(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, unsqueeze_dim, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.unsqueeze(x, unsqueeze_dim)
else:
result = torch.unsqueeze(x, unsqueeze_dim)
dense_result = torch.unsqueeze(x.to_dense(), unsqueeze_dim)
self.assertEqual(dense_result, result.to_dense())
# basic case
test_shape(3, 10, [5, 7, 11], 0)
# hybrid sparse/dense, unsqueeze along sparse dim
test_shape(3, 10, [5, 7, 11, 13, 17], 0)
test_shape(3, 10, [5, 7, 11, 13, 17], 3)
# unsqueeze along dense dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], 4)
test_shape(3, 10, [5, 7, 11, 13, 17], 5)
# wrapped dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], -1)
test_shape(3, 10, [5, 7, 11, 13, 17], -6)
# bounds
test_shape(3, 10, [5, 7, 11, 13, 17], -7, "Dimension out of range")
test_shape(3, 10, [5, 7, 11, 13, 17], 6, "Dimension out of range")
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.select(x, select_dim, select_index)
else:
result = torch.select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
# hybrid sparse/dense, select sparse dim, result is dense
for i in range(sizes[0]):
test_shape(1, 10, sizes, 0, i)
test_shape(1, 10, sizes, 0, sizes[0] + 1, r'select[(][)][:] index \d out of range.*')
# hybrid sparse/dense, select sparse dim, result is sparse
for d in range(3):
for i in range(sizes[d]):
test_shape(3, 10, sizes, d, i)
# hybrid sparse/dense, select dense dim, result is sparse
for d in range(1, 3):
for i in range(sizes[d]):
test_shape(1, 10, sizes, d, i)
@dtypes(*integral_types())
def test_select_no_type_promotion(self, device, dtype):
# see https://github.com/pytorch/pytorch/issues/82150
idx = torch.tensor([[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]])
val = torch.ones(6, dtype=dtype)
s = torch.sparse_coo_tensor(idx, val, size=(3, 3))
for t in (s, s * torch.tensor(0, dtype=dtype)):
# empty checks
self.assertEqual(t.dtype, t[2].dtype)
self.assertEqual(t.dtype, t[0, 1].dtype)
# sum should not promote
self.assertEqual(t.dtype, t[0, 0].dtype)
self.assertEqual(t.dtype, t[1, 1].dtype)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
if isinstance(select_index, int):
select_index = [select_index]
if isinstance(select_index, list):
select_index = torch.tensor(select_index, device=device, dtype=torch.long)
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.index_select(x, select_dim, select_index)
else:
result = torch.index_select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.index_select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
for d in range(len(sizes)):
for index in [0, sizes[d] - 1, [0, sizes[d] // 2, sizes[d] - 1]]:
test_shape(1, 10, sizes, d, index)
test_shape(len(sizes) // 2, 10, sizes, d, index)
test_shape(len(sizes), 10, sizes, d, index)
def _test_index_select_exhaustive_index(self, sizes, dims, device, dtype, coalesced):
t = make_tensor(sizes, dtype=dtype, device=device)
t_sparse = t.to_sparse().coalesce() if coalesced else t.to_sparse()
t_small_sparse, _, _ = self._gen_sparse(len(sizes), 2, sizes, dtype, device, coalesced)
t_small = t_small_sparse.to_dense()
for d in dims:
# NOTE: indices are negative
idx_dim_d_range = list(range(-sizes[d], 0))
for idx_len in range(sizes[d], sizes[d] + 1):
# creates all possible valid indices into dim d of lenght idx_len
for idx in itertools.product(*itertools.repeat(idx_dim_d_range, idx_len)):
t_idx = torch.tensor(idx, dtype=torch.long, device=device)
# NOTE: index_select for dense does not support negative indices,
# hence + sizes[d]. See https://github.com/pytorch/pytorch/issues/76347
# tests the nnz > sizes[d] branch
dense_result = t.index_select(d, t_idx + sizes[d])
sparse_result = t_sparse.index_select(d, t_idx)
self.assertEqual(dense_result, sparse_result)
# tests the nnz <= sizes[d] branch
small_dense_result = t_small.index_select(d, t_idx + sizes[d])
small_sparse_result = t_small_sparse.index_select(d, t_idx)
self.assertEqual(small_dense_result, small_sparse_result)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_exhaustive_index_small(self, device, dtype, coalesced):
# will trigger brute-force algo
self._test_index_select_exhaustive_index((3, 3, 4), range(3), device, dtype, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_exhaustive_index_large(self, device, dtype, coalesced):
# will trigger more sophisticated algos
self._test_index_select_exhaustive_index((100, 50, 3, 3), (2, 3), device, dtype, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_empty_and_non_contiguous_index(self, device, dtype, coalesced):
# empty index
idx_empty = torch.tensor([], dtype=torch.long, device=device)
t = make_tensor((5, 5), dtype=dtype, device=device)
res_dense = t.index_select(0, idx_empty)
res_sparse = t.to_sparse().index_select(0, idx_empty)
self.assertEqual(res_dense, res_sparse)
# non-contigous index
idx = torch.randint(low=0, high=5, size=(10, 2), device=device)[:, 0]
def run_test(sizes):
# case nnz > size[d]
t = make_tensor(sizes, dtype=dtype, device=device)
res_dense = t.index_select(0, idx)
res_sparse = t.to_sparse().index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# case nnz <= size[d]
t_small_sparse, _, _ = self._gen_sparse(len(sizes), 2, sizes, dtype, device, coalesced)
res_sparse = t_small_sparse.index_select(0, idx)
res_dense = t_small_sparse.to_dense().index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# brute-force
run_test((10, 10))
# more sophisticated algos
run_test((10, 100, 100))
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_parallelization(self, device, dtype, coalesced):
"""
Test with sizes that will trigger parallelization (i.e. with sizes
that are >= at::internal::GRAIN_SIZE)
"""
def run_test(nnz, size):
t_sparse, _, _ = self._gen_sparse(1, nnz, (size,), dtype, device, coalesced)
t_dense = t_sparse.to_dense()
# idx_small to (sort) and (binary) search into t_sparse
idx_small = torch.randint(size, (nnz // 2,), device=device)
# idx_large to (sort) and (binary) search into idx_large
# NOTE: when coalesced=True, the (binary) search will be
# done over t_sparse anyway, as it is already sorted.
idx_large = torch.randint(size, (nnz * 2,), device=device)
for idx in (idx_small, idx_large):
res_dense = t_dense.index_select(0, idx)
res_sparse = t_sparse.index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# NOTE: GRAIN_SIZE = 32768
# case nnz <= size[d]
tlen = 70000 # > 2 * GRAIN_SIZE
run_test(tlen, tlen)
# case nnz > size[d]
run_test(tlen, tlen // 2)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(di, dk, dtype=dtype, device=device)
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, self.safeToDense(x), y)
self.assertEqual(res, expected)
res = torch.mm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 200, 20)
test_shape(64, 10000, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 100, 0)
test_shape(10, 100, 0, 0)
test_shape(10, 100, 0, 20)
@unittest.skipIf(
IS_WINDOWS and TEST_CUDA,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@unittest.skipIf(
TEST_CUDA and _get_torch_cuda_version() < (10, 1) and not TEST_WITH_ROCM,
"bmm sparse-dense requires CUDA 10.1 or greater"
)
@coalescedonoff
@dtypes(torch.double)
def test_bmm(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for mat_idx in range(num_mats):
a_mat = self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0]
b_mat = torch.randn([dim_j, dim_k], dtype=dtype, device=device)
a_list.append(a_mat)
b_list.append(b_mat)
a = torch.stack(a_list)
b = torch.stack(b_list)
ab = a.bmm(b)
# Compare each matrix against result from mm()
for mat_idx in range(num_mats):
a_mat = a_list[mat_idx]
b_mat = b_list[mat_idx]
ab_mat_bmm = ab[mat_idx]
ab_mat_mm = a_mat.mm(b_mat)
self.assertEqual(ab_mat_bmm, ab_mat_mm)
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
a = torch.rand([10, 23, 32], dtype=dtype, device=device)
a[3] = torch.zeros(23, 32, dtype=dtype, device=device)
a[6] = torch.zeros(23, 32, dtype=dtype, device=device)
a = a.to_sparse()
b = torch.rand([10, 32, 10], dtype=dtype, device=device)
b[4] = torch.zeros(32, 10, dtype=dtype, device=device)
b[6] = torch.zeros(32, 10, dtype=dtype, device=device)
ab = a.bmm(b)
for mat_idx in range(ab.size(0)):
ab_mat = ab[mat_idx]
ab_mat_check = a[mat_idx].mm(b[mat_idx])
self.assertEqual(ab_mat, ab_mat_check)
ab_traspose_check = b.transpose(1, 2).to_sparse().bmm(
a.transpose(1, 2).to_dense()
).transpose(1, 2)
self.assertEqual(ab, ab_traspose_check)
@onlyCUDA
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(
IS_WINDOWS,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@unittest.skipIf(
_get_torch_cuda_version() < (10, 1) and not TEST_WITH_ROCM,
"bmm sparse-dense requires CUDA 10.1 or greater"
)
def test_bmm_deterministic(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for mat_idx in range(num_mats):
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
a = torch.stack(a_list).cuda()
b = torch.stack(b_list).cuda()
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
torch.use_deterministic_algorithms(False)
ab_nondeterministic = torch.bmm(a, b)
torch.use_deterministic_algorithms(True)
ab_deterministic = torch.bmm(a, b)
diff_abs = (ab_deterministic - ab_nondeterministic).abs()
diff_rel = diff_abs / ab_deterministic.abs()
diff_rel[torch.isnan(diff_rel)] = 0
# deterministic and non-deterministic results should either be
# equal or within a small relative difference
equal_abs_or_rel = diff_abs.eq(0).logical_or(diff_rel.lt(0.001))
self.assertTrue(equal_abs_or_rel.all())
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
@onlyCUDA
@unittest.skipIf(
not IS_WINDOWS or _get_torch_cuda_version() >= (11, 0),
"this test ensures bmm sparse-dense CUDA gives an error when run on Windows with CUDA < 11.0"
)
@dtypes(torch.double)
def test_bmm_windows_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0"):
ab = a.bmm(b)
@onlyCUDA
@skipIfRocm
@unittest.skipIf(
_get_torch_cuda_version() >= (10, 1),
"this test ensures bmm gives error if CUDA version is less than 10.1"
)
@dtypes(torch.double)
def test_bmm_cuda_version_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense requires CUDA 10.1 or greater"):
ab = a.bmm(b)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_saddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.saddmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = torch.saddmm(t, x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
res = torch.smm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
@onlyCPU
@coalescedonoff
# adding a graph break before self.assertFalse(weight._indices().is_contiguous())
# makes the test pass so some existent sparse related bug
@skipIfTorchDynamo("skip")
@dtypes(torch.double, torch.cdouble)
def test_sspaddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = t.sspaddmm(x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = t.sspaddmm(x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
# Test code from issue https://github.com/pytorch/pytorch/issues/45113
batch_size, input_size, hidden_size = 5, 3, 7
# Create coalesced sparse tensor with non-contiguous indices
weight = torch.randn(hidden_size, input_size, dtype=dtype, device=device).to_sparse()
self.assertTrue(weight.is_coalesced())
non_contig_indices = weight.indices().mT.contiguous().mT
weight = torch.sparse_coo_tensor(
indices=non_contig_indices, values=weight.values(), size=weight.shape)
weight._coalesced_(True)
self.assertFalse(weight._indices().is_contiguous())
# Create un/coalesced sparse tensor
bias = torch.randn((hidden_size, 1), dtype=dtype, device=device).to_sparse()
bias = torch.cat([bias] * batch_size, dim=1)
if coalesced:
bias = bias.coalesce()
x = torch.randn(input_size, batch_size, dtype=dtype, device=device)
res = bias.sspaddmm(weight, x)
true_result = (bias.to_dense() + torch.matmul(weight.to_dense(), x)).to_sparse()
self.assertEqual(self.safeToDense(res), self.safeToDense(true_result))
@coalescedonoff
@unittest.skip("See https://github.com/pytorch/pytorch/issues/73145")
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
def test_sparse_addmm(self, device, dtype, coalesced):
def test_shape(m, n, p, nnz, broadcast, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device, requires_grad=True)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device, requires_grad=True)
D2 = make_tensor([m, p], dtype=dtype, device=device, requires_grad=True)
S = self._gen_sparse(2, nnz, [n, m], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
def fn(S, D1, D2, beta=beta, alpha=alpha):
return torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
gradcheck(fn, (S, D1, D2), check_sparse_nnz=True)
test_shape(7, 8, 9, 20, False, None)
test_shape(7, 8, 9, 20, True, None)
test_shape(7, 8, 9, 20, False, (1, 0))
test_shape(7, 8, 9, 20, True, (1, 0))
test_shape(7, 8, 9, 20, False, (1, 1))
test_shape(7, 8, 9, 20, True, (1, 1))
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupport triggers assertion error")
def test_sparse_mm(self, device, dtype, coalesced):
def test_shape(d1, d2, d3, nnz, transposed):
if transposed:
D = torch.randn(d3, d2, dtype=dtype,
device=device).t_().requires_grad_(True)
else:
D = torch.randn(d2, d3, dtype=dtype, device=device).requires_grad_(True)
S = self._gen_sparse(2, nnz, [d1, d2], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
def fn(S, D):
return torch.sparse.mm(S, D)
gradcheck(fn, (S, D), check_sparse_nnz=True)
test_shape(7, 8, 9, 20, False)
test_shape(7, 8, 9, 20, True)
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(TEST_WITH_CROSSREF, "generator unsupport triggers assertion error")
def test_sparse_mul(self, device, dtype, coalesced):
# https://github.com/pytorch/pytorch/issues/79914
a = torch.tensor([[0., 1]], dtype=dtype, device=device).to_sparse().requires_grad_(True)
b = torch.tensor([[0., 1]], dtype=dtype, device=device).to_sparse().requires_grad_(True)
gradcheck(lambda x, y: torch.sparse.sum(x * y).to_dense(), [a, b], check_sparse_nnz=True)
def test_shape(sparse_dims, nnz, with_shape):
a = self._gen_sparse(sparse_dims, nnz, with_shape, dtype, device, coalesced)[0].requires_grad_(True)
b = self._gen_sparse(sparse_dims, nnz, with_shape, dtype, device, coalesced)[0].requires_grad_(True)
self.assertEqual((a * b).to_dense(), a.to_dense() * b.to_dense())
gradcheck(lambda x, y: (x * y).to_dense(), [a, b], check_sparse_nnz=True)
# Issues with 0-dim indices/values
gradcheck(lambda x, y: torch.sparse.sum(x * y).to_dense(), [a, b], check_sparse_nnz=True)
# TODO: Re-enable these
# test_shape(2, 3, [2, 3, 4, 5])
# test_shape(2, 3, [2, 2, 0])
@coalescedonoff
@dtypes(torch.double)
def test_dsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.dsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
def test_hsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.hsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
def test_spadd(self, device, dtype, coalesced):
def _test_spadd_shape(nnz, shape_i, shape_v=None):
shape = shape_i + (shape_v or [])
x, _, _ = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
y = self.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = self.randn(*s, dtype=dtype, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
x, i, v = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
nnz = i.size(1)
# Non contiguous sparse indices tensor
x_ = self.sparse_tensor(i[:, ::2], v[:(nnz + 1) // 2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse values tensor
x_ = self.sparse_tensor(i[:, :(nnz + 1) // 2], v[::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse indices and values tensors
x_ = self.sparse_tensor(i[:, 1::2], v[1::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
def _test_spadd():
_test_spadd_shape(10, [5, 6])
_test_spadd_shape(10, [10, 10, 10])
_test_spadd_shape(10, [50, 30, 20])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5])
_test_spadd_shape(0, [0, 30, 20])
_test_spadd_shape(0, [50, 0, 20])
_test_spadd_shape(0, [50, 30, 0])
def _test_spadd_hybrid():
_test_spadd_shape(10, [5, 6], [2, 3])
_test_spadd_shape(10, [10, 10, 10], [3])
_test_spadd_shape(10, [50, 30, 20], [2])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5], [2])
_test_spadd_shape(0, [0, 30, 20], [2, 0])
_test_spadd_shape(0, [50, 0, 20], [2, 0])
_test_spadd_shape(0, [50, 30, 0], [2, 0])
_test_spadd_shape(10, [50, 30, 20], [2, 0])
_test_spadd()
_test_spadd_hybrid()
@onlyCUDA
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_add_out_bfloat16(self, device, dtype, coalesced):
# fp32
x, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
y, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
x = x.float().cuda()
y = y.float().cuda()
res_fp32 = torch.add(x, y)
# bfloat16
x = x.bfloat16()
y = y.bfloat16()
res_bf16 = torch.add(x, y)
res_bf16 = res_bf16.float() # to compare with reference
self.assertEqual(res_fp32, res_bf16, atol=1e-2, rtol=0)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_norm(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, _, _ = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
y = x.coalesce()
self.assertEqual(x.norm(), y._values().norm())
test_shape(3, 10, 100)
test_shape(4, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(4, 0, [0, 0, 100, 5, 5, 5, 0])
# Unsupported arguments should error
kwarg_error_pairs = [
({'keepdim': True},
RuntimeError, r'norm_sparse currently does not support keepdim=True'),
({'dim': 0},
RuntimeError, r'norm_sparse currently only supports full reductions'),
({'dtype': torch.double, 'p': 'fro'},
ValueError, r'dtype argument is not supported in frobenius norm'),
({'dtype': torch.double, 'p': 0},
RuntimeError, r"norm_sparse currently does not support 'dtype' argument")
]
x = self._gen_sparse(3, 10, 100, dtype, device, coalesced)[0]
for kwargs, err, msg in kwarg_error_pairs:
with self.assertRaisesRegex(err, msg):
x.norm(**kwargs)
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(TEST_WITH_CROSSREF, "fallback triggers cuda device error")
def test_sparse_sum(self, device, dtype, coalesced):
def run_tests(S, td=None):
D = S.coalesce().to_dense().detach().requires_grad_(True)
if td is None:
S_sum = torch.sparse.sum(S)
D_sum = D.sum()
self.assertEqual(S_sum.item(), D_sum.item())
def fn(S):
res = torch.sparse.sum(S)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
else:
S_sum = torch.sparse.sum(S, td)
D_sum = D.sum(td)
self.assertEqual(S_sum.to_dense() if S_sum.is_sparse else S_sum, D_sum)
def fn(S):
res = torch.sparse.sum(S, td)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
nnz = 10
sparse_dims = 2
with_size = [5, 5, 1, 4] # use a dense dim = 1 to test for squeeze
test_dims = []
for i in range(1, 5):
test_dims += itertools.combinations(range(len(with_size)), i)
# https://github.com/pytorch/pytorch/issues/16501
x = torch.tensor([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 2.]], dtype=dtype, device=device).to_sparse()
self.assertEqual(torch.sparse.sum(x, dim=0), torch.sparse.sum(x, dim=-2))
self.assertEqual(torch.sum(x.to_dense(), dim=0), torch.sparse.sum(x, dim=0).to_dense())
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
# dim out of range
self.assertRaises(IndexError, lambda: torch.sparse.sum(S, 5))
# dim 0 appears multiple times in the list of dims
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(S, [0, 0]))
# sum an empty tensor
empty_S = torch.sparse_coo_tensor(size=with_size, dtype=dtype, device=device)
self.assertEqual(torch.sparse.sum(empty_S, [0]).to_dense(), torch.sum(empty_S.to_dense(), [0]))
self.assertEqual(torch.sparse.sum(empty_S), torch.tensor(0, dtype=dtype, device=device))
empty_S.requires_grad_(True)
empty_S_sum = torch.sparse.sum(empty_S)
empty_S_sum.backward()
self.assertEqual(empty_S.grad.to_dense(), empty_S.clone().detach().to_dense())
# test values().sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True))
for test_dim in test_dims:
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True), test_dim)
def _test_basic_ops_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v)
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 - x2
y2 = x1.clone()
y2.sub_(x2)
expected = self.safeToDense(x1) - self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * x2
y2 = x1.clone()
y2.mul_(x2)
expected = self.safeToDense(x1) * self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * 37.5
y2 = x1.clone()
y2.mul_(37.5)
expected = self.safeToDense(x1) * 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 / 37.5
y2 = x1.clone()
y2.div_(37.5)
expected = self.safeToDense(x1) / 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 // 37.5
y2 = x1.clone()
y2.floor_divide_(37.5)
expected = self.safeToDense(x1) // 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
# TODO: add back inplace support
y1 = x1 ** 2
y2 = x1.clone()
y2 = y2.pow(2)
expected = self.safeToDense(x1) ** 2
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y = x1.clone()
y.zero_()
expected = torch.zeros(x1.size(), dtype=dtype, device=device)
self.assertEqual(self.safeToDense(y), expected)
self.assertEqual(x1.is_coalesced(), coalesced)
y = x1.coalesce()
z = x1.coalesce()
self.assertEqual(x1.is_coalesced(), coalesced)
self.assertTrue(y.is_coalesced())
y._values().add_(1)
if not x1.is_coalesced():
# check that coalesce is out of place if the original tensor is not
# coalesced.
self.assertEqual(z._values() + 1, y._values())
else:
# check that coalesce is in-place if the original tensor is
# coalesced.
self.assertEqual(z._values(), y._values())
@coalescedonoff
@dtypes(torch.double)
def test_basic_ops(self, device, dtype, coalesced):
def _test_basic_ops():
self._test_basic_ops_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [], [], dtype, device, coalesced)
def _test_basic_ops_hybrid():
self._test_basic_ops_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
_test_basic_ops()
_test_basic_ops_hybrid()
@dtypes(torch.double, torch.cdouble)
def test_add_dense_sparse_mismatch(self, device, dtype):
def test_shape(dense_size, sparse_dims_shape, dense_dims_shape, sparse_size):
x = torch.zeros(dense_size, dtype=dtype, device=device)
sparse_y = self.sparse_tensor(torch.zeros(sparse_dims_shape, dtype=torch.int64, device=device),
torch.randn(dense_dims_shape, dtype=dtype, device=device),
torch.Size(sparse_size))
with self.assertRaisesRegex(
RuntimeError,
"add: expected 'self' and 'other' to have same size"):
x + sparse_y
test_shape([3, 4], [1, 4], [4, 4, 4], [3, 4, 4])
test_shape([3, 4, 0], [1, 4], [4, 4, 4, 0], [3, 4, 4, 0])
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@dtypes(torch.double, torch.cdouble)
def test_add_noncontiguous(self, device, dtype):
indices = self.index_tensor([[1, 2], [0, 2]], device=device)
values = torch.tensor([1.], dtype=dtype, device=device).expand(2, 3, 4, 5)
x = self.sparse_tensor(indices, values, dtype=dtype, device=device)
assert not x._values().is_contiguous()
y = x + x
expected = self.safeToDense(x) + self.safeToDense(x)
self.assertEqual(self.safeToDense(y), expected)
def _test_sparse_mask_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v or [])
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_mask(self, device, dtype, coalesced):
def _test_sparse_mask_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.tensor([1, 2, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
dense = torch.tensor([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
], dtype=dtype, device=device)
exp_v = torch.tensor([7, 14, 3, 20], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse()
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
dense = torch.empty([5, 4, 0], dtype=dtype, device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 0]), dtype=dtype, device=device)
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag.
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
_test_sparse_mask_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
# check repetitions and matchings in the intersection
lhs = torch.randint(0, 5, (100,), device=device)
rhs = torch.randint(0, 5, (100,), device=device).to_sparse()
self.assertEqual(lhs.to_sparse().sparse_mask(rhs), lhs.sparse_mask(rhs))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_mask_hybrid(self, device, dtype, coalesced):
def _test_sparse_mask_hybrid_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.tensor([[1, 2], [2, 3], [3, 4], [4, 5]])
# TODO: This is also testing that, if coalesce is a no-op,
# the indices don't get permuted. I don't know if we actually
# want to give this invariant.
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2])).coalesce()
dense = torch.tensor([
[[1, 3], [2, 2], [3, 3], [4, 2]],
[[5, 7], [6, 7], [7, 9], [8, 9]],
[[9, 2], [10, 4], [11, 1], [12, 3]],
[[13, 5], [14, 1], [15, 1], [16, 6]],
[[17, 7], [18, 2], [19, 7], [20, 1]],
])
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
exp_v = torch.tensor([[7, 9], [14, 1], [3, 3], [20, 1]])
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2]))
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.empty(4, 2, 0)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2, 0])).coalesce()
dense = torch.empty(5, 4, 2, 0)
res_dense_lhs = dense.sparse_mask(x)
sparse = dense.to_sparse(2)
res_sparse_lhs = sparse.sparse_mask(x)
exp_v = torch.empty(4, 2, 0)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2, 0]))
self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
# check no side effects for the coalesce flag
self.assertTrue(sparse.is_coalesced())
self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
_test_sparse_mask_hybrid_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_zeros(self, device, dtype, coalesced):
def _test_zeros(nnzs, shape, out_shape_i, out_shape_v=None):
out_shape = out_shape_i + (out_shape_v or [])
for nnz in nnzs:
out, _, _ = self._gen_sparse(len(out_shape_i), nnz, out_shape, dtype, device, coalesced)
torch.zeros(*shape, out=out, dtype=dtype, device=device)
self.assertEqual(tuple(out.size()), tuple(shape))
self.assertTrue(out._indices().numel() == out._values().numel() == 0)
self.assertEqual(out._nnz(), 0)
self.assertEqual(out.sparse_dim(), len(shape))
self.assertEqual(out.dense_dim(), 0)
def test_shape(i_shapes, v_shapes, shape, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros(nnzs, shape, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 4], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 0], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 0], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 0], [9, 12])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_zeros_like(self, device, dtype, coalesced):
def _test_zeros_like(nnzs, template_shape_i, template_shape_v=None):
template_shape_v = template_shape_v or []
template_shape = template_shape_i + template_shape_v
for nnz in nnzs:
t, _, _ = self._gen_sparse(len(template_shape_i), nnz, template_shape, dtype, device, coalesced)
res = torch.zeros_like(t)
self.assertEqual(tuple(res.size()), tuple(template_shape))
self.assertTrue(res._indices().numel() == res._values().numel() == 0)
self.assertEqual(res._nnz(), 0)
self.assertEqual(res.sparse_dim(), len(template_shape_i))
self.assertEqual(res.dense_dim(), len(template_shape_v))
def test_shape(i_shapes, v_shapes, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros_like(nnzs, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.zeros_like(x, memory_format=mem_format)
result = torch.zeros_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
dense_tensor = sparse_tensor.to_dense()
result = torch.zeros_like(dense_tensor, layout=torch.sparse_coo)
self.assertEqual(dense_tensor.shape, result.shape)
self.assertEqual(result.layout, torch.sparse_coo)
sparse_zeros = torch.zeros(dense_tensor.shape, layout=torch.sparse_coo)
self.assertEqual(result._indices().shape, sparse_zeros._indices().shape)
self.assertEqual(result._values().shape, sparse_zeros._values().shape)
def _assert_sparse_invars(self, t):
# SparseTensor has the following invariants:
# - sparse_dim + dense_dim = len(SparseTensor.shape)
# - SparseTensor._indices().shape = (sparse_dim, nnz)
# - SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
self.assertEqual(t.sparse_dim() + t.dense_dim(), len(t.shape))
self.assertEqual(tuple(t._indices().shape), (t.sparse_dim(), t._nnz()))
self.assertEqual(tuple(t._values().shape), (t._nnz(), ) + t.shape[t.sparse_dim():])
def _test_empty_like(self, sparse_tensor, dtype, device, coalesced):
result = torch.empty_like(sparse_tensor)
self.assertTrue(result.is_sparse)
self._assert_sparse_invars(result)
self.assertEqual(result.shape, sparse_tensor.shape)
self.assertEqual(result.dtype, sparse_tensor.dtype)
self.assertEqual(result.device, sparse_tensor.device)
self.assertEqual(result.sparse_dim(), sparse_tensor.sparse_dim())
self.assertEqual(result.dense_dim(), sparse_tensor.dense_dim())
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.empty_like(x, memory_format=mem_format)
result = torch.empty_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
with self.assertRaisesRegex(
RuntimeError, r"Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend"
):
dense_tensor = sparse_tensor.to_dense()
result = torch.empty_like(dense_tensor, layout=torch.sparse_coo)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_empty_like(self, device, dtype, coalesced):
# tests https://github.com/pytorch/pytorch/issues/43699
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
def _test_narrow(self, input, narrow_args):
expected = input.to_dense().narrow(*narrow_args)
self.assertEqual(expected, input.narrow_copy(*narrow_args).to_dense())
def _all_narrow_combs(self, shape):
for dim, dim_sz in enumerate(shape):
for start in range(dim_sz):
for length in range(dim_sz - start):
yield [dim, start, length]
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_narrow(self, device, dtype, coalesced):
shape = [3, 3, 4, 2]
input, _, _ = self._gen_sparse(4, 19, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(input, narrow_args)
self.assertRaises(RuntimeError, lambda: input.narrow_copy(-1, 0, 3)) # dim < 0
self.assertRaises(RuntimeError, lambda: input.narrow_copy(10, 0, 3)) # dim > input.dim()
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, shape[0] + 1, 3)) # start > size of dim
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, 2, shape[0])) # start+length > size of dim
with_dense, _, _ = self._gen_sparse(2, 7, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(with_dense, narrow_args)
self.assertRaises(RuntimeError, lambda: with_dense.narrow_copy(10, 0, 3)) # dim > sparseDim + denseDim
def _test_log1p_tensor(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in integral_types()
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.log1p()
is_integral_dtype = is_integral(sparse_tensor.dtype)
self.assertEqual(expected_output, sparse_tensor.log1p().to_dense())
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
sparse_tensor.coalesce().log1p_()
else:
self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "log1p_ requires coalesced input"):
sparse_tensor.log1p_()
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "only Tensors of floating point dtype can require gradients"):
sparse_tensor.requires_grad_()
@coalescedonoff
@dtypes(*all_types())
def test_log1p(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2]]).transpose(1, 0),
values=torch.tensor([3.0, 4.0, 5.0]),
size=[3, ],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[1.0, 3.0], [5.0, 7.0]]),
size=[4, 5, 2],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, 3.0, 4.0, 1.0, 1.0, 1.0]),
size=[3, ],
device=device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
device=device,
dtype=dtype
)
# empty tensors are coalesced at creation (nnz < 2) we must force the uncoalesced state
input_uncoalesced._coalesced_(False)
self._test_log1p_tensor(input_uncoalesced, coalesced)
def _test_neg_negative(self, sparse_tensor):
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.neg()
ops = (
torch.neg, torch.Tensor.neg, torch.Tensor.neg_,
torch.negative, torch.Tensor.negative, torch.Tensor.negative_,
operator.neg
)
for op in ops:
sparse_tensor_copy = sparse_tensor.clone()
self.assertEqual(expected_output, op(sparse_tensor_copy).to_dense())
if op in (torch.neg, torch.negative):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_neg_negative(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
def _test_asin_arcsin(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in integral_types()
is_integral_dtype = is_integral(sparse_tensor.dtype)
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.asin()
ops = (
torch.asin, torch.Tensor.asin,
torch.arcsin, torch.Tensor.arcsin,
)
for op in ops:
self.assertEqual(expected_output, op(sparse_tensor).to_dense())
if op in (torch.asin, torch.arcsin):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
if not is_integral_dtype:
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
else:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor, out=sparse_tensor_out)
for op in (torch.Tensor.asin_, torch.Tensor.arcsin_):
if is_integral_dtype:
# test coalesce on integral dtype tensor
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor.clone().coalesce()).to_dense()
else:
self.assertEqual(expected_output, op(sparse_tensor.clone().coalesce()).to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "asin_ requires coalesced input"):
op(sparse_tensor)
@coalescedonoff
@dtypes(*all_types())
def test_asin_arcsin(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2, 3]]),
values=torch.tensor([0.5, -0.5, 0.7, -0.7]),
size=[4, ],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-0.1, 0.24], [-0.44, 0.1]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([0.3, -0.3, -0.4, 0.3, -0.5, 0.15]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_asin_arcsin(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
# empty tensors are coalesced at creation (nnz < 2) we must force the uncoalesced state
input_uncoalesced._coalesced_(False)
self._test_asin_arcsin(input_uncoalesced, coalesced)
@coalescedonoff
@dtypes(torch.double)
def test_mv(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(dk, dtype=dtype, device=device)
res = x.matmul(t)
expected = self.safeToDense(x).matmul(t)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 1000, 20)
test_shape(64, 10000, 10000, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 0, 0)
test_shape(10, 100, 100, 0)
test_shape(10, 100, 100, 20)
with self.assertRaisesRegex(RuntimeError, r"mv: expected self\.size\(-1\) == vec\.size\(-1\)"):
test_shape(10, 100, 10, 20)
with self.assertRaisesRegex(RuntimeError, "mv: two tensor dim should be 2 and 1"):
x, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
y, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
res = x.mv(y)
@dtypes(*floating_and_complex_types())
def test_sparse_add_coalesce(self, device, dtype):
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.tensor([3, 4, 5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
y = self.sparse_tensor(i, v, torch.Size([3]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.empty([3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
y = self.sparse_tensor(i, v, torch.Size([3, 0]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
@onlyCUDA
def test_storage_not_null(self):
x = torch.cuda.sparse.FloatTensor(2)
self.assertNotEqual(x.get_device(), -1)
x = torch.cuda.sparse.FloatTensor(2, 0)
self.assertNotEqual(x.get_device(), -1)
@onlyCUDA
@deviceCountAtLeast(2)
def test_same_gpu(self, devices):
def check_device(x, device_id):
self.assertEqual(x.get_device(), device_id)
self.assertEqual(x._values().get_device(), device_id)
self.assertEqual(x._indices().get_device(), device_id)
dev1, dev2 = devices[0], devices[1]
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3]), device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3, 0]), device=1)
check_device(x, 1)
x = self.sparse_empty(3, device=1)
check_device(x, 1)
x = self.sparse_empty(3, 0, device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3])))
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3, 0])))
def _test_new_device(self, size, device=torch.cuda):
with torch.cuda.device(device):
x = torch.cuda.sparse.DoubleTensor(*size)
self.assertEqual(x.get_device(), device)
x1 = x.new()
x2 = x.new(2, 3)
self.assertEqual(x1.get_device(), device)
self.assertEqual(x2.get_device(), device)
@onlyCUDA
def test_new_device_single_gpu(self):
self._test_new_device((), 0)
self._test_new_device((30, 20), 0)
self._test_new_device((30, 20, 10), 0)
self._test_new_device((30, 20, 10, 0), 0)
@onlyCUDA
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
def test_new_device_multi_gpu(self):
self._test_new_device((), 1)
self._test_new_device((30, 20), 1)
self._test_new_device((30, 20, 10), 1)
self._test_new_device((30, 20, 10, 0), 1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_new(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, indices, values = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
if not x.is_cuda:
# CUDA sparse tensors currently requires the size to be
# specified if nDimV > 0
out = x.new(indices, values).coalesce()
x_c = x.coalesce()
self.assertEqual((out.indices(), out.values()), (x_c.indices(), x_c.values()))
self.assertEqual(x.new(indices, values, x.size()), x)
test_shape(3, 10, 100)
test_shape(3, 0, [100, 100, 0])
@onlyCPU # not really, but we only really want to run this once
@dtypes(torch.float64, torch.float32, torch.float16, torch.cfloat, torch.cdouble)
def test_factory(self, device, dtype):
for test_empty_tensor in [True, False]:
if test_empty_tensor:
default_size = torch.Size([1, 3, 0])
size = torch.Size([3, 3, 0])
else:
default_size = torch.Size([1, 3])
size = torch.Size([3, 3])
for include_size in [True, False]:
for use_tensor_idx in [True, False]:
for use_tensor_val in [True, False]:
for use_cuda in ([False] if not torch.cuda.is_available() else [True, False]):
# have to include size with cuda sparse tensors
include_size = include_size or use_cuda
long_dtype = torch.int64
device = torch.device('cpu') if not use_cuda else \
torch.device(torch.cuda.device_count() - 1)
indices = torch.tensor(([0], [2]), dtype=long_dtype) if use_tensor_idx else ([0], [2])
if test_empty_tensor:
values = torch.empty(1, 0).to(dtype)
else:
if use_tensor_val:
values = torch.tensor([1.], dtype=dtype)
else:
values = 1.
if include_size:
sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
device=device, requires_grad=True)
else:
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
device=device, requires_grad=True)
self.assertEqual(indices, sparse_tensor._indices())
self.assertEqual(values, sparse_tensor._values())
self.assertEqual(size if include_size else default_size, sparse_tensor.size())
self.assertEqual(dtype, sparse_tensor.dtype)
if use_cuda:
self.assertEqual(device, sparse_tensor._values().device)
self.assertEqual(True, sparse_tensor.requires_grad)
@dtypes(torch.double, torch.cdouble)
def test_factory_size_check(self, device, dtype):
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([.5, .5], dtype=dtype, device=device)
sizes = torch.Size([2, 3])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices.fill_(-1)
with self.assertRaisesRegex(RuntimeError, "found negative index"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([2, 3, 1, 0])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 2, 2], dtype=dtype, device=device)
sizes = torch.Size([0, 0, 2, 2])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([[1, 1, 1], [1, 1, 1]], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
def test_factory_default(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = self.index_tensor([[]], device=device)
expected_size = torch.Size([0])
self.assertEqual(tensor._indices(), expected_indices)
self.assertEqual(tensor.shape, expected_size)
def test_factory_empty_indices(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = torch.empty((1, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 0]), device=device)
expected_indices = torch.empty((2, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0]), device=device)
expected_indices = torch.empty((3, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0, 0]), device=device)
expected_indices = torch.empty((4, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
@dtypes(torch.double, torch.cdouble)
def test_factory_nnz(self, device, dtype):
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.tensor([[1, 1], [1, 1]], dtype=dtype, device=device) # (nnz, ...): (2, 2)
sizes = torch.Size([2, 2])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.empty([2, 0], dtype=dtype, device=device) # (nnz, ...): (2, 0)
sizes = torch.Size([2, 0])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
@dtypes(torch.double, torch.cdouble)
def test_factory_nnz_zero(self, device, dtype):
def test_shape(i_shape, v_shape, size, expected_size):
if size:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), torch.Size(size),
dtype=dtype, device=device)
else:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), dtype=dtype, device=device)
expected_indices = torch.empty(i_shape, device=device, dtype=torch.int64)
expected_values = torch.empty(v_shape, device=device, dtype=dtype)
expected_size = torch.Size(expected_size)
self.assertEqual(t._indices(), expected_indices)
self.assertEqual(t._values(), expected_values)
self.assertEqual(t.size(), expected_size)
test_shape([1, 0], [0, 2, 4, 0], None, [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], None, [0, 0, 0, 2, 4, 0])
test_shape([1, 0], [0, 2, 4, 0], [0, 2, 4, 0], [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [0, 0, 0, 2, 4, 0], [0, 0, 0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [1, 2, 3, 2, 4, 0], [1, 2, 3, 2, 4, 0])
@dtypes(torch.double, torch.cdouble)
def test_factory_dense_dim(self, device, dtype):
indices = self.index_tensor([[0]], device=device)
values = torch.tensor([[[1, 1, 1], [1, 1, 1]]], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
indices = self.index_tensor([[0]], device=device)
values = torch.empty([1, 2, 3, 0], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
@onlyCPU
@dtypes(torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble, torch.int64)
def test_factory_type_inference(self, device, dtype):
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1.], dtype=dtype))
self.assertEqual(dtype, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1]))
self.assertEqual(torch.int64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.HalfTensor(1, 0))
self.assertEqual(torch.float16, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.FloatTensor(1, 0))
self.assertEqual(torch.float32, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.DoubleTensor(1, 0))
self.assertEqual(torch.float64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.LongTensor(1, 0))
self.assertEqual(torch.int64, t.dtype)
@onlyCUDA
def test_factory_device_type_inference(self, device):
# both indices/values are CUDA
cpu_cuda = ('cpu', 'cuda')
cpu_cuda_none = cpu_cuda + (None,)
for indices_device, values_device, device in itertools.product(cpu_cuda,
cpu_cuda,
cpu_cuda_none):
indices = torch.tensor(([0], [2]), device=indices_device)
values = torch.tensor([1.], device=values_device)
empty_values = torch.empty(1, 0).to(values_device)
shape = (1, 3)
empty_shape = (1, 3, 0)
if device is None and indices_device != values_device:
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, values, shape, device=device)
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
else:
t = torch.sparse_coo_tensor(indices, values, shape, device=device)
t_empty = torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
self.assertEqual(should_be_cuda, t.is_cuda)
self.assertEqual(t.is_cuda, t_empty.is_cuda)
@onlyCPU
def test_factory_copy(self, device):
def test_tensor(indices, values, indices_equal, values_equal):
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64, device=device)
if indices_equal:
self.assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
else:
self.assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
if values_equal:
self.assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
else:
self.assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
# both correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, True, True)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, True, True)
# only indices correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float16)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, True, True) # An empty tensor's data_ptr is always equal to 0
# only values correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, False, True)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, False, True)
# neither correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, False, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, False, True) # An empty tensor's data_ptr is always equal to 0
# complex support
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = make_tensor([1, ], dtype=torch.cdouble, device=device)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = make_tensor([1, 1], dtype=torch.cdouble, device=device)
test_tensor(indices, values, False, False)
@onlyCPU # just run once, we test both cpu and cuda
def test_constructor_device_legacy(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
x = torch.sparse_coo_tensor(i, v, size, device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
x = torch.sparse_coo_tensor(i, v, size, device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
def test_legacy_constructor(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v.storage()))
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v))
self.assertEqual(torch.sparse_coo, torch.sparse.FloatTensor(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor([6]))
def test_legacy_new(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
s = torch.sparse_coo_tensor(i, v, size)
self.assertEqual(torch.sparse_coo, s.new(device='cpu').layout)
self.assertRaises(TypeError, lambda: s.new(v.storage()))
self.assertRaises(TypeError, lambda: s.new(v))
self.assertEqual(torch.sparse_coo, s.new(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: s.new([6]))
@onlyCPU # not really, but we only really want to run this once
def test_dtypes(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.is_available():
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
@onlyCPU # not really, but we only really want to run this once
def test_empty_full(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.device_count() > 0:
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
def test_is_sparse(self, device):
x = torch.randn(3, 3)
self.assertFalse(x.is_sparse)
x = torch.randn(3, 3, 0)
self.assertFalse(x.is_sparse)
x = self.legacy_sparse_tensor()
self.assertTrue(x.is_sparse)
x = self.sparse_empty(1, 0, device=device)
self.assertTrue(x.is_sparse)
def test_resize_as(self, device):
def do_test(t):
y = t.new().resize_as_(t).zero_()
self.assertEqual(y.shape, t.shape)
# Check that y can be added to t. Currently, this requires that
# sparse_dim and dense_dim match.
self.assertEqual(t, t + y)
do_test(self.legacy_sparse_tensor())
do_test(self.sparse_empty([3, 0], device=device))
do_test(self.sparse_empty([3, 3], device=device))
def _test_resize_shape(self, x_i, x_v, x_size, y_i, y_v, y_size, dtype, device):
x_v_numel = torch.zeros(x_v).numel()
y_v_numel = torch.zeros(y_v).numel()
x = torch.sparse_coo_tensor(torch.zeros(x_i),
torch.arange(x_v_numel).resize_(x_v).to(torch.float),
torch.Size(x_size), dtype=dtype, device=device)
x_dense = x.to_dense()
y = torch.sparse_coo_tensor(torch.zeros(y_i),
torch.ones(y_v).to(torch.float),
torch.Size(y_size), dtype=dtype, device=device)
y_dense = y.to_dense()
x.resize_as_(y)
x_dense.resize_as_(y_dense)
self.assertEqual(x.shape, y.shape)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
self.assertEqual(x.shape, x_dense.shape)
self.assertEqual(y.shape, y_dense.shape)
# Here we make sure that the original data are preserved after resizing
self.assertEqual(x.to_dense().view(-1)[0:x_v_numel].view(x_v),
x_dense.view(-1)[0:x_v_numel].view(x_v))
@dtypes(torch.double, torch.cdouble)
def test_resize(self, device, dtype):
# 1. Expand the size of some dense dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
self._test_resize_shape([1, 1], [1, 2, 0], [2, 2, 0],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
# 2. Expand the size of some sparse dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [4, 2, 3],
dtype=dtype, device=device)
# 3. Change the shapes of both sparse and dense dimensions when nnz is zero [Supported]
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 5], [1, 1, 2, 4, 5],
dtype=dtype, device=device)
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 0], [1, 1, 2, 4, 0],
dtype=dtype, device=device)
# 4. Add dims to dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 4], [2, 2, 3, 4],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 0], [2, 2, 3, 0],
dtype=dtype, device=device)
# 5. Remove dims from dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2], [2, 2],
dtype=dtype, device=device)
# 6. Change the number of sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[2, 1], [1, 2, 3], [1, 2, 2, 3],
dtype=dtype, device=device)
# 7. Shrink the size of some sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [1, 2, 3],
dtype=dtype, device=device)
# 8. Shrink the size of some dense dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 2], [2, 2, 2],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 0], [2, 2, 0],
dtype=dtype, device=device)
def test_is_nonzero(self, device):
self.assertTrue(torch.sparse_coo_tensor(([0],), 1., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0], [0]), 0., (1, 1), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,), device=device).is_nonzero())
# scalar sparse tensor
self.assertTrue(torch.sparse_coo_tensor(torch.zeros(0, 1), 12.3, [], device=device).is_nonzero())
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch.sparse_coo_tensor(([0, 1],), torch.empty(2, 0), (4, 0), device=device).is_nonzero()
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
@dtypes(torch.double, torch.cdouble)
def test_change_tensor_metadata(self, device, dtype):
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]), dtype=dtype, device=device)
i.resize_(2, 3)
v.resize_(4, 5)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.resize_as_(self.index_tensor([0, 1], device=device))
v.resize_as_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.as_strided_((2, 1), (1, 1))
v.as_strided_((1, 3), (1, 1))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.set_(self.index_tensor([0, 1], device=device))
v.set_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.transpose_(0, 1)
v.transpose_(0, 1)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
@coalescedonoff
@dtypes(torch.double)
def test_pickle(self, device, dtype, coalesced):
import pickle
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape)
serialized = pickle.dumps(sp_tensor)
sp_tensor_loaded = pickle.loads(serialized)
self.assertEqual(sp_tensor, sp_tensor_loaded)
def test_any(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([False, False]), device=device)
t_any = torch.tensor(False)
self.assertEqual(torch.any(t), t_any)
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([True, False]), device=device)
t_any = torch.tensor(True)
self.assertEqual(torch.any(t), t_any)
def test_isnan(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([1, 4]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([False, False]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([1, float("nan")]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([False, True]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
@coalescedonoff
@dtypes(torch.float32, torch.float64)
def test_div_rounding_mode(self, device, dtype, coalesced):
sparse, _, _ = self._gen_sparse(2, 10, (10, 10), dtype,
device, coalesced)
dense = self.safeToDense(sparse)
for mode in (None, 'floor', 'trunc'):
actual = sparse.div(-2, rounding_mode=mode)
expect = dense.div(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test inplace
actual = sparse.clone().div_(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test out argument
actual.zero_()
torch.div(sparse, -2, rounding_mode=mode, out=actual)
self.assertEqual(self.safeToDense(actual), expect)
def test_div_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse division requires',
lambda: torch.tensor(1., device=device).to_sparse()
/ torch.tensor(1., device=device).to_sparse())
def test_floor_divide_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse floor division requires',
lambda: torch.tensor(1., device=device).to_sparse()
// torch.tensor(1., device=device).to_sparse())
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
@onlyCPU
def test_sparse_to_numpy(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([1, 4]))
self.assertRaises(TypeError, lambda: t.numpy())
@coalescedonoff
@dtypes(torch.double)
def test_softmax(self, device, dtype, coalesced):
import torch.nn.functional as F
def to_dense(sparse, fill_value=None):
"""
Return dense tensor from a sparse tensor using given fill value.
"""
if fill_value is None or fill_value == 0:
return sparse.to_dense()
sparse = sparse.coalesce()
dense = torch.full(sparse.shape, fill_value, dtype=sparse.dtype, device=sparse.device)
for idx, value in zip(sparse._indices().t(), sparse._values()):
dense[tuple(idx)] = value
return dense
def softmax_to_dense(sparse, dim):
"""Dense softmax of a sparse tensor. Useful only for testing softmax
correctness.
When computing softmax of a sparse tensor, the value of
unspecified items is negative infinity rather than zero so
that
softmax(sparse.to_dense(fill_value=-inf), dim) == softmax(sparse, dim).to_dense()
holds for non-empty lines. One empty lines, the softmax
values are defined as 0 in order to preserve the sparsity
of result.
Note that in PyTorch, ``to_dense`` method does not
implement the ``fill_value`` keyword argument.
"""
dtype = sparse.dtype
device = sparse.device
dense = to_dense(sparse, fill_value=-float('inf'))
r = F.softmax(dense, dim)
# softmax on empty lines results nan, replace with zeros to match the definition
r[r != r] = 0
return r
def sparse_softmax(sparse, dim):
"""Pure Python softmax of a sparse tensor. Assuming -inf for
unspecified sparse tensor data. This is a prototype of
sparse softmax algorithm in Python.
"""
dtype = sparse.dtype
device = sparse.device
# softmax is non-linear operation, so sparse tensors must
# be coalesced.
sparse = sparse.coalesce()
inf = float('inf')
indices = sparse._indices()
values = sparse._values()
if dim < sparse.sparse_dim():
nnz = sparse._nnz()
# compute pool indices
size = sparse.size()
strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)
for i in reversed(range(sparse.sparse_dim() - 1)):
strides[i, 0] = strides[i + 1, 0] * size[i + 1]
strides[dim, 0] = 0
pool = (indices * strides).sum(dim=0)
i2p = {}
for i in range(nnz):
c = int(pool[i])
if c not in i2p:
i2p[c] = len(i2p)
pool[i] = i2p[c]
# compute max
dense_size = tuple(size[sparse.sparse_dim():])
mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)
mx[:] = -inf
for n in range(nnz):
p = pool[n]
mx[p] = torch.max(mx[p], values[n])
# apply exp to (v - mx) and sum the results
exp_values = torch.empty_like(values)
exp_sums = torch.zeros_like(mx)
for n in range(nnz):
p = pool[n]
v = exp_values[n] = (values[n] - mx[p]).exp()
exp_sums[p] = exp_sums[p] + v
# normalize with the sum of exponents
for n in range(nnz):
p = pool[n]
exp_values[n] = exp_values[n] / exp_sums[p]
return torch.sparse_coo_tensor(indices,
exp_values,
sparse.size(),
dtype=dtype, device=device)
elif dim < sparse.sparse_dim() + sparse.dense_dim():
return torch.sparse_coo_tensor(indices,
F.softmax(values, dim - sparse.sparse_dim() + 1),
sparse.size(),
dtype=dtype, device=device)
else:
raise ValueError(
'`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'
% (dim, sparse.sparse_dim(), sparse.dense_dim()))
def softmax_jacobian_analytic(x, dim):
"""Return Jacobian of softmax using analytic formula
D_jS_i = S_i * (1[i==j] - S_j).
where S = softmax(x, dim), x is dense tensor, i,j in
range(x.shape[dim]).
"""
y = F.softmax(x, dim)
y[y != y] = 0 # replace nan-s with zeros
J = torch.zeros((x.shape[dim],) + tuple(x.shape), dtype=x.dtype, device=x.device)
si = [slice(None)] * len(y.shape)
sj = [slice(None)] * len(y.shape)
s = [slice(None)] * len(J.shape)
for i in range(y.shape[dim]):
si[dim] = i
s[dim + 1] = i
yi = y[tuple(si)]
for j in range(y.shape[dim]):
sj[dim] = j
s[0] = j
if i == j:
J[tuple(s)] = yi * (1 - yi)
else:
yj = y[tuple(sj)]
J[tuple(s)] = - yi * yj
sj[dim] = slice(None)
si[dim] = slice(None)
s[dim + 1] = slice(None)
return J
def softmax_jacobian_autograd(x, dim, log=False):
"""Return Jacobian of softmax using PyTorch autograd feature.
x can be dense or sparse tensor.
"""
import itertools
if x.is_sparse:
x = x.coalesce()
dtype = x.dtype
device = x.device
shape = tuple(x.shape)
J = torch.zeros((shape[dim],) + shape, dtype=dtype, device=device)
for i in range(shape[dim]):
if x.is_sparse:
sparse_dim = x.sparse_dim()
dense_dim = x.dense_dim()
if dim < sparse_dim:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
if dim == j:
ranges.append([i])
else:
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.ones((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
else:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.zeros((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
sv = [slice(None)] * (dense_dim + 1)
sv[dim - sparse_dim + 1] = i
values[tuple(sv)] = 1
v = torch.sparse_coo_tensor(indices, values, shape, dtype=dtype, device=device)
else:
v = torch.zeros_like(x)
sv = [slice(None)] * len(v.shape)
sv[dim] = i
v[tuple(sv)] = 1
x_ = x.clone()
x_.requires_grad_(True)
if log:
if x_.is_sparse:
y = torch.sparse.log_softmax(x_, dim)
else:
y = F.log_softmax(x_, dim)
else:
if x_.is_sparse:
y = torch.sparse.softmax(x_, dim)
else:
y = F.softmax(x_, dim)
# replace nan-s with zeros
y.data[y != y] = 0
y.backward(v)
g = x_.grad
if not g.is_sparse:
# replace nan-s with zeros
g.data[g != g] = 0
J[i] = g.to_dense() if g.is_sparse else g
return J
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1166")
def test_op(sparse_dims, nnz, with_size, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
for dim in range(x.sparse_dim() + x.dense_dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
y1 = torch.sparse.softmax(x, dim)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, dim)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
test_op(1, 10, [3], coalesced)
test_op(1, 10, [2, 3], coalesced)
test_op(1, 10, [3, 2], coalesced)
test_op(2, 10, [2, 3, 4], coalesced)
test_op(2, 10, [3, 4], coalesced)
test_op(2, 5, [5, 4], coalesced)
test_op(2, 10, [3, 4, 2], coalesced)
test_op(3, 10, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2, 3], coalesced)
test_op(3, 100, [3, 4, 2, 3, 5, 2], coalesced)
test_op(4, 100, [3, 4, 2, 3, 5, 2], coalesced)
@dtypes(torch.double)
def test_softmax_zero_nnz(self, device, dtype):
t = torch.sparse_coo_tensor([[]], [], (3,), device=device, dtype=dtype)
out = torch.sparse.softmax(t, 0)
self.assertEqual(out.to_dense(), torch.zeros_like(t))
# TODO: Check after why ROCm's cusparseXcsrgemm2Nnz function doesn't return the same nnz value as CUDA
@skipIfRocm
@coalescedonoff
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_types_and(*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else [],
torch.complex64,
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@unittest.skipIf(TEST_WITH_CROSSREF, "not working with fake tensor")
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2, torch.complex64: 1e-2, torch.float32: 1e-2})
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
# We convert grad to dense since dense and sparse mm
# implementations handle materialized zeroes differently.
self.assertEqual(a.grad.to_dense(), a_grad.to_dense())
self.assertEqual(b.grad.to_dense(), b_grad.to_dense())
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
# Check result is truly coalesced
self.assertTrue(r2.is_coalesced() and is_coalesced_indices(r2))
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), check_sparse_nnz=True, nondet_tol=1e-5)
else:
gradcheck(fn, (a, b), check_sparse_nnz=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
def test_error_cases():
def fn(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
r2 = torch.sparse.mm(a, b)
# This is not a matrix
self.assertRaises(RuntimeError, lambda: fn(3, 4, [2, 2, 2], [2, 2, 2]))
# Shapes does not
self.assertRaisesRegex(RuntimeError,
r"mat1 and mat2 shapes cannot be multiplied \(2x3 and 4x2\)",
lambda: fn(2, 10, [2, 3], [4, 2]))
def different_dtypes():
a, i_a, v_a = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
r2 = torch.sparse.mm(a.to(torch.float64), a.to(torch.float32))
self.assertRaisesRegex(RuntimeError, 'mat1 dtype Double does not match mat2 dtype Float', different_dtypes)
for n in range(2, 5):
for m in range(2, 8):
for p in range(2, 8):
test_sparse_matmul(2, 10, [n, m], [m, p])
test_sparse_matmul(2, 0, [0, 0], [0, 0])
test_sparse_matmul(2, 0, [0, 10], [10, 0])
test_error_cases()
@coalescedonoff
@dtypes(torch.double)
def test_assign(self, device, dtype, coalesced):
def assign_to():
a, i_a, v_a = self._gen_sparse(2, 5, [2, 3], dtype, device, coalesced)
a[0] = 100
self.assertRaises(TypeError, assign_to)
@dtypes(torch.double, torch.cdouble)
def test_full_broadcast_to(self, device, dtype):
def can_broadcast(s0, s1):
s0 = tuple(reversed(s0))
s1 = tuple(reversed(s1))
for i in range(len(s0)):
if s0[i] != 1 and s0[i] != s1[i]:
return False
return True
sizes = (
(), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)
)
for s0, s1 in itertools.combinations(sizes, r=2):
t = make_tensor(s0, dtype=dtype, device=device, low=-9, high=9)
for sparse_dims in range(1, len(s0) + 1):
s = t.to_sparse(sparse_dims)
if can_broadcast(s0, s1):
t_res = torch.broadcast_to(t, s1)
s_res = torch._sparse_broadcast_to(s, s1)
torch._validate_sparse_coo_tensor_args(s_res._indices(), s_res._values(), s_res.shape)
if s_res.is_coalesced():
# ensure that is_coalesced is estimated correctly
self.assertEqual(s_res, torch.sparse_coo_tensor(s_res._indices(), s_res._values(), s_res.shape).coalesce())
self.assertEqual(s_res.to_dense(), t_res)
else:
with self.assertRaisesRegex(RuntimeError,
r"The expanded size of the tensor \(\d\) "
r"must match the existing size \(\d\)"):
torch._sparse_broadcast_to(s, s1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_broadcast_to(self, device, dtype, coalesced):
def test(sparse_dims, nnz, with_size, new_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
x1 = torch._sparse_broadcast_to(x, new_size)
y1 = y.broadcast_to(new_size)
self.assertEqual(self.safeToDense(x1), y1)
test(4, 6, [7, 3, 1, 3, 0], [7, 3, 4, 3, 0])
test(4, 6, [7, 3, 1, 3, 0], [2, 7, 3, 1, 3, 0])
test(4, 6, [7, 3, 1, 3, 1, 3], [7, 3, 1, 3, 2, 3])
test(4, 6, [7, 3, 1, 3, 2, 1], [7, 3, 1, 3, 2, 3])
def _test_mul_skips(self, device, dtype, coalesced):
skipTestIfUncoalesced = False
# This case always coalesce inputs and that could lead to loss of precision,
# hence it is inhibited for float16/bfloat16 by providing already coalesced tensors.
if not coalesced and dtype in {torch.float16, torch.bfloat16}:
skipTestIfUncoalesced = True
# to_dense is problematic for boolean non-coalesced CUDA tensors
# see https://github.com/pytorch/pytorch/issues/81648
if not coalesced and dtype == torch.bool and torch.device(device).type == "cuda":
skipTestIfUncoalesced = True
if skipTestIfUncoalesced:
self.skipTest(f"Test with dtype={dtype}, device={device} runs only with coalesced inputs")
@coalescedonoff
# NOTE: addcmul_out is not implemented for bool and half.
@dtypes(*all_types_and_complex_and(torch.bfloat16))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_sparse_mul(self, device, dtype, coalesced):
self._test_mul_skips(device, dtype, coalesced)
shape = (2, 3, 4, 10)
nnz = 10
def check(self, x, y):
res_sparse = x * y
res_dense = x.to_dense() * y.to_dense()
self.assertEqual(res_sparse.to_dense(), res_dense)
def check_empty(sparse_shape, nnz, dense_shape, coalesce):
from itertools import product
for nnz_val, shape_suffix in product((nnz, 0), ((), (0,))):
empty_sparse_shape = sparse_shape + shape_suffix
empty_dense_shape = dense_shape + shape_suffix
x = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
check(self, x, x)
# TODO: uncomment once backward is implemented for sparse tensors that broadcast in dense dims.
# def check_autograd(x, y):
# if dtype in {torch.double, torch.cdouble}:
# xa = x.detach().clone().requires_grad_(True)
# ya = y.detach().clone().requires_grad_(True)
# gradcheck(lambda a, b: (a * b).to_dense(), (xa, ya), check_sparse_nnz=True)
# gradcheck(lambda a, b: (a * b).to_dense(), (ya, xa), check_sparse_nnz=True)
for dim in range(len(shape) + 1):
sub_shape = shape[dim:]
sparse_dim = len(sub_shape) // 2
check_empty(sub_shape, nnz, shape, coalesced)
x = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
y = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
check(self, x, y)
# TODO: uncomment once supported
# check_autograd(x, y)
# check broadcasting in dense dims
for d in range(sparse_dim, len(sub_shape)):
new_shape = sub_shape[:d] + (1,) + sub_shape[d + 1:]
y = self._gen_sparse(sparse_dim, nnz, new_shape, dtype, device, coalesced)[0]
check(self, x, y)
# TODO: uncomment once supported
# check_autograd(x, y)
@coalescedonoff
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_dense_mul(self, device, dtype, coalesced):
self._test_mul_skips(device, dtype, coalesced)
shape = (2, 3, 4, 10)
nnz = 10
def check(self, s, d):
res = d * s
# check commutativity
self.assertEqual(res, s * d)
# check correctness
self.assertEqual(res.to_dense(), s.to_dense() * d)
# check in-placeness for dense
if d.dim() >= s.dim():
dc = d.clone()
self.assertEqual(d.mul_(s), dc.mul_(s.to_dense()))
# check in-placeness for sparse
if s.dim() >= d.dim():
# for sparse
sc = s.clone()
self.assertEqual(s.mul_(d).to_dense(), sc.to_dense().mul_(d))
for dim in range(len(shape) + 1):
sub_shape = shape[dim:]
sparse_dim = len(sub_shape) // 2
def check_empty(sparse_shape, nnz, dense_shape, coalesce):
from itertools import product
for nnz_val, shape_suffix in product((nnz, 0), ((), (0,))):
empty_sparse_shape = sparse_shape + shape_suffix
empty_dense_shape = dense_shape + shape_suffix
s = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
d = make_tensor(empty_dense_shape, dtype=dtype, device=device)
check(self, s, d)
# check scalar multiplication
s = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
for scalar in (True, 1, 1.0):
res_sparse_right = s * scalar
res_sparse_left = scalar * s
res_dense = s.to_dense() * scalar
# check correctness and dtype
self.assertEqual(s.to(res_sparse_right.dtype), res_sparse_right)
self.assertEqual(res_sparse_right, res_sparse_left)
self.assertEqual(res_sparse_right.dtype, res_dense.dtype)
self.assertEqual(res_sparse_left.dtype, res_dense.dtype)
# check scalar as 0-dim sparse tensor
tscalar = torch.tensor(scalar, device=device)
sscalar = tscalar.to_sparse()
res_sparse_right = s * sscalar
res_sparse_left = sscalar * s
self.assertEqual(res_sparse_right, res_sparse_left)
self.assertEqual(s.to(res_sparse_right.dtype), res_sparse_right)
# check non-coalesced 0-dim scalar
# we skip torch.bool because for such tensors
# coalesce.to_dense != to_dense
if dtype == torch.bool:
return
for scalar_dtype in (int, float):
scalar = scalar_dtype(1)
idx = torch.tensor([], device=device).reshape(0, 2)
val = torch.tensor([scalar, scalar], device=device)
sscalar = torch.sparse_coo_tensor(idx, val, ())
res_dense = s.to_dense() * sscalar.to_dense()
self.assertEqual((s * sscalar).to_dense(), res_dense)
self.assertEqual((sscalar * s).to_dense(), res_dense)
# Case 1: sparse broadcasts over dense
s = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
d = make_tensor(shape, dtype=dtype, device=device)
check(self, s, d)
check_empty(sub_shape, nnz, shape, coalesced)
# Case 2: dense broadcasts over sparse
s = self._gen_sparse(3, nnz, shape, dtype, device, coalesced)[0]
d = make_tensor(sub_shape, dtype=dtype, device=device)
check(self, s, d)
check_empty(shape, nnz, sub_shape, coalesced)
@unittest.skipIf(not TEST_NUMPY, "NumPy is not available")
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.bool))
def test_sparse_spdiags(self, device, dtype):
make_diags = functools.partial(make_tensor, dtype=dtype, device=device)
make_offsets = functools.partial(torch.tensor, dtype=torch.long, device=device)
if TEST_SCIPY:
def reference(diags, offsets, shape):
return scipy.sparse.spdiags(diags, offsets, *shape).toarray()
else:
def reference(diags, offsets, shape):
result = torch.zeros(shape, dtype=dtype, device=device)
for i, off in enumerate(offsets):
res_view = result.diagonal(off)
data = diags[i]
if off > 0:
data = data[off:]
m = min(res_view.shape[0], data.shape[0])
res_view[:m] = data[:m]
return result
def check_valid(diags, offsets, shape, layout=None):
ref_out = reference(diags, offsets, shape)
out = torch.sparse.spdiags(diags, offsets, shape, layout=layout)
if layout is None:
ex_layout = torch.sparse_coo
else:
ex_layout = layout
out_dense = out.to_dense()
self.assertTrue(out.layout == ex_layout, f"Output layout {out.layout} expected {ex_layout}")
self.assertEqual(out_dense, ref_out, f"Result:\n{out_dense} does not match reference:\n{ref_out}")
def check_invalid(args, error):
with self.assertRaisesRegex(RuntimeError, error):
torch.sparse.spdiags(*args)
def valid_cases():
# some normal cases
yield (make_diags((1, 5)), make_offsets([0]), (5, 5))
yield (make_diags((3, 3)), make_offsets([-1, 0, 1]), (4, 4))
# noncontigous diags
yield (make_diags((5, 4), noncontiguous=True), make_offsets([-1, 1, 0, 2, -2]), (5, 5))
# noncontigous offsets
yield (make_diags((3, 4)), make_offsets([1, -1, 0, -2, 2])[::2], (5, 5))
# noncontigous diags + offsets
yield (make_diags((3, 4), noncontiguous=True), make_offsets([1, -1, 0, -2, 2])[::2], (5, 5))
# correct dimensionality, 2d, 2d , and shapes match, but the number of diagonals is zero
yield (make_diags((0, 3)), make_offsets([]), (3, 3))
# forward rotation of upper diagonals
yield (make_diags((3, 8)), make_offsets([1, 2, 3]), (4, 4))
# rotation exausts input space to read from
yield (make_diags((2, 3)), make_offsets([2, 1]), (3, 3))
# Simple cases repeated with special output format
yield (make_diags((1, 5)), make_offsets([0]), (5, 5), torch.sparse_csc)
yield (make_diags((3, 3)), make_offsets([-1, 0, 1]), (4, 4), torch.sparse_csr)
# vector diags
yield (make_diags((3, )), make_offsets([1]), (4, 4))
# Scalar offset
yield (make_diags((1, 3)), make_offsets(2), (4, 4))
# offsets out of range
yield (make_diags((1, 3)), make_offsets([3]), (3, 3))
yield (make_diags((1, 3)), make_offsets([-3]), (3, 3))
for case in valid_cases():
check_valid(*case)
def invalid_cases():
yield (make_diags((1, 3)), make_offsets([0]), (3, 2, 3)), "Output shape must be 2d"
yield (make_diags((2, 3)), make_offsets([[1, 2], [0, 3]]), (3, 3)), "Offsets must be scalar or vector"
yield (make_diags((3, 2, 3)), make_offsets([0, 1, 2]), (4, 4)), "Diagonals must be vector or matrix"
yield (make_diags((3, 3)), make_offsets([-1, 0]), (3, 3)),\
r"Number of diagonals \(\d\) does not match the number of offsets \(\d\)"
yield (make_diags((5,)), make_offsets([0, 1, 2, 3, 4]), (3, 3)),\
r"Number of diagonals \(\d\) does not match the number of offsets \(\d\)"
yield (make_diags((2, 2)), make_offsets([-1, 0]), (2, 3), torch.strided),\
r"Only output layouts \(\w+, \w+, \w+\) are supported, got \w+"
yield (make_diags((2, 5)), make_offsets([0, 0]), (5, 5)), "Offset tensor contains duplicate values"
yield (make_diags((1, 5)), make_offsets([0]).to(torch.int32), (5, 5)), r"Offset Tensor must have dtype Long but got \w+"
for case, error_regex in invalid_cases():
check_invalid(case, error_regex)
def test_small_nnz_coalesced(self):
# creating a coo tensor with nnz == 0 is always coalesced
self.assertTrue(torch.sparse_coo_tensor([[], []], [], (2, 2)).is_coalesced())
# same for a coo tensor with only 1 nnz
self.assertTrue(torch.sparse_coo_tensor([[0], [0]], [1], (2, 2)).is_coalesced())
# two or more nnz coalesced is false as it can't be verified without an expensive check
self.assertFalse(torch.sparse_coo_tensor([[0, 0], [0, 0]], [1, 2], (2, 2)).is_coalesced())
# even if there are no duplicates
self.assertFalse(torch.sparse_coo_tensor([[0, 1], [0, 1]], [1, 2], (2, 2)).is_coalesced())
@coalescedonoff
@dtypes(*all_types_and_complex_and(torch.bool))
def test_sum(self, device, dtype, coalesced):
def run_test(shape, nnz):
a = self._gen_sparse(2, nnz, shape, dtype, device, coalesced)[0]
self.assertEqual(a.sum(), a._values().sum())
if dtype.is_floating_point or dtype.is_complex:
a.requires_grad_(True)
a.sum().backward()
self.assertEqual(a.grad, torch.ones(shape, dtype=dtype, device=device))
for shape in [(10, 5), (10, 10)]:
run_test(shape, 0)
run_test(shape, max(shape))
run_test(shape, shape[0] * shape[1])
class TestSparseOneOff(TestCase):
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_from_cpu(self):
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"):
torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4),
[3, 4, 4])
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"):
torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0),
[3, 4, 4, 0])
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"):
torch.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0),
[0, 4, 4, 0])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_sparse_cpu_dense_add(self):
x = torch.zeros(3, 4, 4)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4).cuda(),
[3, 4, 4])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(3, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0).cuda(),
[3, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(0, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0).cuda(),
[0, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
|
def all_sparse_layouts(test_name='layout', include_strided=False):
return parametrize(test_name, [
subtest(torch.strided, name='Strided'),
subtest(torch.sparse_coo, name='SparseCOO'),
subtest(torch.sparse_csr, name='SparseCSR'),
subtest(torch.sparse_csc, name='SparseCSC'),
subtest(torch.sparse_bsr, name='SparseBSR'),
subtest(torch.sparse_bsc, name='SparseBSC'),
][(0 if include_strided else 1):])
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_legacy_warnings
|
def test_legacy_warnings(self):
def f1():
"torch.sparse.SparseTensor() is deprecated."\
" Please use torch.sparse_coo_tensor((0,), dtype=)"
x_ref = torch.sparse_coo_tensor((0,), dtype=torch.float64)
x = torch.sparse.DoubleTensor()
self.assertEqual(x, x_ref)
def f2():
"torch.sparse.SparseTensor(cdata=x._cdata) is deprecated."\
" Please use torch.sparse_coo_tensor(x._indices(), x._values(), x.shape)"
x_ref = torch.tensor([[1, 2], [3, 4]], dtype=torch.float64).to_sparse()
x = torch.sparse.DoubleTensor(cdata=x_ref._cdata)
y = torch.sparse_coo_tensor(x._indices(), x._values(), x.shape)
self.assertEqual(x, x_ref)
self.assertEqual(y, x_ref)
def f3():
"torch.sparse.SparseTensor(indices, values, *, device=) is deprecated."\
" Please use torch.sparse_coo_tensor(indices, values, dtype=, device=)"
x_ref = torch.sparse_coo_tensor([[0, 0, 1, 1], [0, 1, 0, 1]], [1, 2, 3, 4], dtype=torch.float64)
x = torch.sparse.DoubleTensor(torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]]),
torch.tensor([1, 2, 3, 4], dtype=torch.float64))
self.assertEqual(x, x_ref)
def f4():
"torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated."\
" Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=)"
x_ref = torch.sparse_coo_tensor([[0, 0, 1, 1], [0, 1, 0, 1]], [1, 2, 3, 4], (2, 3), dtype=torch.float64)
x = torch.sparse.DoubleTensor(torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]]),
torch.tensor([1, 2, 3, 4], dtype=torch.float64), (2, 3))
self.assertEqual(x, x_ref)
def f5():
"torch.sparse.SparseTensor(shape, *, device=) is deprecated."\
" Please use torch.sparse_coo_tensor(shape, dtype=, device=)"
x_ref = torch.sparse_coo_tensor((2, 3), dtype=torch.float64)
x = torch.sparse.DoubleTensor(2, 3)
self.assertEqual(x, x_ref)
for test_f in [f1, f2, f3, f4, f5]:
with self.assertWarns(UserWarning, msg=test_f.__doc__) as cm:
test_f()
test_f()
# Check warn-once:
self.assertEqual(len(cm.warnings), 1)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparseLegacyAndDeprecation(TestCase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
f1
|
def f1():
"torch.sparse.SparseTensor() is deprecated."\
" Please use torch.sparse_coo_tensor((0,), dtype=)"
x_ref = torch.sparse_coo_tensor((0,), dtype=torch.float64)
x = torch.sparse.DoubleTensor()
self.assertEqual(x, x_ref)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
f2
|
def f2():
"torch.sparse.SparseTensor(cdata=x._cdata) is deprecated."\
" Please use torch.sparse_coo_tensor(x._indices(), x._values(), x.shape)"
x_ref = torch.tensor([[1, 2], [3, 4]], dtype=torch.float64).to_sparse()
x = torch.sparse.DoubleTensor(cdata=x_ref._cdata)
y = torch.sparse_coo_tensor(x._indices(), x._values(), x.shape)
self.assertEqual(x, x_ref)
self.assertEqual(y, x_ref)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
f3
|
def f3():
"torch.sparse.SparseTensor(indices, values, *, device=) is deprecated."\
" Please use torch.sparse_coo_tensor(indices, values, dtype=, device=)"
x_ref = torch.sparse_coo_tensor([[0, 0, 1, 1], [0, 1, 0, 1]], [1, 2, 3, 4], dtype=torch.float64)
x = torch.sparse.DoubleTensor(torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]]),
torch.tensor([1, 2, 3, 4], dtype=torch.float64))
self.assertEqual(x, x_ref)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
f4
|
def f4():
"torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated."\
" Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=)"
x_ref = torch.sparse_coo_tensor([[0, 0, 1, 1], [0, 1, 0, 1]], [1, 2, 3, 4], (2, 3), dtype=torch.float64)
x = torch.sparse.DoubleTensor(torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]]),
torch.tensor([1, 2, 3, 4], dtype=torch.float64), (2, 3))
self.assertEqual(x, x_ref)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_sparse.py
|
setUp
|
def setUp(self):
TestCase.setUp(self)
self.index_tensor = lambda *args, **kwargs: torch.tensor(*args, **kwargs, dtype=torch.int64)
def sparse_empty_factory(*args, **kwargs):
kwargs['layout'] = kwargs.get('layout', torch.sparse_coo)
return torch.empty(*args, **kwargs)
self.sparse_empty = sparse_empty_factory
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
self.legacy_sparse_tensor = torch.sparse.DoubleTensor
|
def setUp(self):
TestCase.setUp(self)
self.index_tensor = lambda *args, **kwargs: torch.tensor(*args, **kwargs, dtype=torch.int64)
def sparse_empty_factory(*args, **kwargs):
kwargs['layout'] = kwargs.get('layout', torch.sparse_coo)
return torch.empty(*args, **kwargs)
self.sparse_empty = sparse_empty_factory
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
sparse_tensor_factory
|
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
self.legacy_sparse_tensor = torch.sparse.DoubleTensor
|
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
assert_uncoalesced
|
def assert_uncoalesced(self, x):
"""
Test if a CPU tensor is uncoalesced. This is used to ensure
correctness of the uncoalesced tensor generation algorithm.
"""
assert not x.is_coalesced()
existing_indices = set()
for i in range(x._nnz()):
index = str(x._indices()[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
|
def assert_uncoalesced(self, x):
"""
Test if a CPU tensor is uncoalesced. This is used to ensure
correctness of the uncoalesced tensor generation algorithm.
"""
assert not x.is_coalesced()
existing_indices = set()
indices = x._indices()
for i in range(x._nnz()):
index = str(indices[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
class TestSparse(TestSparseBase):
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
class TestSparse(TestSparseBase):
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_tensor
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_tensor
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_tensor
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_tensor
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
fn
|
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
|
def fn(x):
return x.to_dense(masked_grad=gradcheck.masked)
x.requires_grad_(True)
gradcheck(fn, (x,))
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_sparse.py
|
test_shape
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
|
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
reduction_ops_with_sparse_support = [op for op in reduction_ops if 'masked.' not in op.name and
(op.supports_sparse
or op.supports_sparse_csr
or op.supports_sparse_csc
or op.supports_sparse_bsr
or op.supports_sparse_bsc)]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
|
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF, TEST_WITH_ROCM, skipIfTorchDynamo, \
parametrize, subtest, is_coalesced_indices, suppress_warnings, instantiate_parametrized_tests, \
skipIfCrossRef
from torch.testing._internal.common_cuda import TEST_CUDA
from numbers import Number
from typing import Dict, Any
from packaging import version
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, TEST_MULTIGPU)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes, onlyNativeDeviceTypes)
from torch.testing._internal.common_methods_invocations import \
(op_db, reduction_ops, sparse_unary_ufuncs, sparse_masked_reduction_ops, binary_ufuncs)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
from torch.testing._internal.opinfo.refs import (
ElementwiseBinaryPythonRefInfo,
ReductionPythonRefInfo
)
reduction_ops_with_sparse_support = [
op for op in reduction_ops if 'masked.' not in op.name and
_op_supports_any_sparse(op) and not isinstance(op, ReductionPythonRefInfo)]
binary_ufuncs_with_sparse_support = [
op for op in binary_ufuncs if _op_supports_any_sparse(op) and
not isinstance(op, ElementwiseBinaryPythonRefInfo)]
like_fns_with_sparse_support = [op for op in op_db if _op_supports_any_sparse(op) and '_like' in op.name]
import scipy.sparse
load_tests = load_tests
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and version.parse(torch.version.cuda) > version.parse("11.2")
) or (not IS_WINDOWS and not TEST_WITH_ROCM)
HIPSPARSE_SPMM_COMPLEX128_SUPPORTED = torch.version.hip and version.parse(torch.version.hip.split("-")[0]) >= version.parse("6.0")
from itertools import product, repeat
import pickle
import torch.nn.functional as F
import itertools
from itertools import product
from itertools import product
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.