library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_jit_cuda_fuser.py
|
_get_extremal_tensor
|
def _get_extremal_tensor(x, val, dtype):
if x.dtype != dtype:
return x
return torch.full_like(x, val)
|
import contextlib
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \
is_iterable_of_tensors, freeze_rng_state, skipIfRocm
from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
CUDA_MAJOR, CUDA_MINOR = 0, 0
os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE']
os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE']
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
ALIAS_TEST_DISABLED = True
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
TEST_LARGE_TENSOR = RUN_NVFUSER
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_jit_cuda_fuser.py
|
_get_extremal_input
|
def _get_extremal_input(x, val, dtype):
if isinstance(x, torch.Tensor):
return _get_extremal_tensor(x, val, dtype)
elif is_iterable_of_tensors(x):
return [_get_extremal_tensor(y, val, dtype) for y in x]
return x
|
import contextlib
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \
is_iterable_of_tensors, freeze_rng_state, skipIfRocm
from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
CUDA_MAJOR, CUDA_MINOR = 0, 0
os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE']
os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE']
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
ALIAS_TEST_DISABLED = True
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
TEST_LARGE_TENSOR = RUN_NVFUSER
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_jit_cuda_fuser.py
|
_get_extremal_sample
|
def _get_extremal_sample(sample: SampleInput, val, dtype):
extremal_sample = SampleInput(
input=_get_extremal_input(sample.input, val, dtype),
args=tuple(_get_extremal_input(x, val, dtype) for x in sample.args),
kwargs={k: _get_extremal_input(v, val, dtype) for k, v in sample.kwargs.items()},
)
return extremal_sample
|
import contextlib
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \
is_iterable_of_tensors, freeze_rng_state, skipIfRocm
from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
CUDA_MAJOR, CUDA_MINOR = 0, 0
os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE']
os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE']
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
ALIAS_TEST_DISABLED = True
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
TEST_LARGE_TENSOR = RUN_NVFUSER
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_jit_cuda_fuser.py
|
_get_extremal_samples
|
def _get_extremal_samples(sample: SampleInput, dtype):
vals = [float('inf'), float('-inf'), float('nan')]
if dtype.is_complex:
complex_vals = itertools.product(vals, vals)
vals = tuple(map(lambda x: complex(*x), complex_vals))
for val in vals:
yield _get_extremal_sample(sample, val, dtype)
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
for extremal_sample in _get_extremal_samples(sample, dtype):
try:
with freeze_rng_state():
ref = variant(*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs)
except (torch._C._LinAlgError, RuntimeError, ValueError):
# if eager errors out, then don't expect NVFuser to pass
continue
with freeze_rng_state():
val = trace(*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs)
self.assertEqual(val, ref, equal_nan=True, exact_device=True)
# See [Note: Clearing CU after NVFuser tests]
torch.jit._state._python_cu.drop_all_functions()
|
import contextlib
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \
is_iterable_of_tensors, freeze_rng_state, skipIfRocm
from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
CUDA_MAJOR, CUDA_MINOR = 0, 0
os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition,' + os.environ['PYTORCH_NVFUSER_ENABLE']
os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,' + os.environ['PYTORCH_NVFUSER_DISABLE']
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
ALIAS_TEST_DISABLED = True
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
TEST_LARGE_TENSOR = RUN_NVFUSER
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_jit_fuser.py
|
warmup_forward
|
def warmup_forward(f, *args):
profiling_count = 2
for i in range(profiling_count):
results = f(*args)
return results
@skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "skip due to SIGIOT failures, #67646")
class TestFuser(JitTestCase):
def assertAllFused(self, graph, except_for=()):
diff_graphs = [n for n in graph.nodes() if n.kind() == 'prim::DifferentiableGraph']
if len(diff_graphs) > 0:
self.assertEqual(len(diff_graphs), 1)
graph = diff_graphs[0].g('Subgraph')
allowed_nodes = {'prim::Constant', 'prim::FusionGroup', 'prim::BailoutTemplate',
'prim::BailOut', 'prim::TupleConstruct'} | set(except_for)
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
'got {}'.format(graph))
self.assertTrue([node.kind() for node in graph.nodes()].count('prim::FusionGroup') == 1)
def _test_fused_abs(self, device='cpu'):
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertAllFused(scripted.graph_for(a))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu(self):
self._test_fused_abs()
@unittest.skipIf(not IS_WINDOWS, "This is meant to be Windows-specific")
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu_unicode_temp_dir(self):
with TemporaryDirectoryName(suffix='中文') as dname:
shell_env = os.environ.copy()
shell_env['TMP'] = dname
cmd = [sys.executable, os.path.basename(__file__), type(self).__name__ + '.test_abs_cpu']
legacy_jit_flag = '--jit-executor=legacy'
for v in sys.argv:
if v == legacy_jit_flag:
cmd.append(legacy_jit_flag)
return_code = shell(cmd, cwd=os.path.dirname(__file__), env=shell_env)
self.assertEqual(return_code, 0)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_abs_cuda(self):
self._test_fused_abs(device="cuda")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_zero_element_tensors(self):
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device="cuda")
cos = torch.zeros(0, device="cuda")
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_arg_configurations_smoke_cuda(self):
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
traced_f = torch.jit.trace(f, (x, y,))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_broadcast_cuda(self):
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device='cuda'),
torch.randn(4, dtype=torch.float, device='cuda'),
torch.randn(4, dtype=torch.float, device='cuda'),
]
ge = self.checkTrace(scaleshift, inputs)
self.assertAllFused(ge.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no bfloat support with profiling on")
def test_cuda_bfloat16(self):
def foo(x, y):
return (x + y).relu()
m = torch.jit.script(foo)
x = torch.randn(65536).cuda().bfloat16()
y = torch.randn_like(x)
self.assertAllFused(m.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device='cuda')
y = torch.randn(4, 4, dtype=torch.half, device='cuda')
funcs = [
self.fn_test_comparison_gt_lt,
self.fn_test_relu,
self.fn_test_exp
]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(), local_inputs, allow_unused=True, retain_graph=True)
fusion_grads = torch.autograd.grad(
fusion_output.sum(), local_fusion_inputs, allow_unused=True, retain_graph=True)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_checks_cat_inputs(self):
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x ** 2, y + 4 * y + y ** 3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device='cuda')
y = torch.randn(1, 4, dtype=torch.float, device='cuda')
scripted = self.checkScript(f, (x, y))
self.assertAllFused(scripted.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_remainder_cuda(self):
def cuda_rem(x, y):
return 1 + torch.remainder(x, y) - 1
a = torch.rand([512], dtype=torch.float).cuda()
b = torch.rand([512], dtype=torch.float).cuda()
inputs = [a, b]
ge = self.checkScript(cuda_rem, inputs)
graph = ge.graph_for(*inputs)
self.assertAllFused(graph)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_cuda(self):
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device='cuda')]
ge = self.checkScript(fn, inputs)
graph = ge.graph_for(*inputs)
self.assertAllFused(graph)
FileCheck().check("prim::ConstantChunk[chunks=3, dim=1]").run(str(graph))
@staticmethod
def _test_chunk_correctness(self, device='cpu'):
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(1, 2),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_chunk_correctness(self):
return self._test_chunk_correctness(self, 'cpu')
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_correctness_cuda(self):
return self._test_chunk_correctness(self, 'cuda')
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_chunk_distributes_cuda(self):
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
FileCheck().check("broadcast_tensors").check('with prim::FusionGroup_') \
.check_count('ConstantChunk', 2, exactly=True).run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_chunk_motion_deduplicates_inputs(self):
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [
torch.tensor([1.1, 1.2], device='cuda', dtype=torch.float),
]
for func in [func1, func2]:
module = self.checkScript(func, inputs)
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, 'prim::FusionGroup', 1)
fusion_group = list(forward_graph.nodes())[-1]
self.assertEqual(len(list(fusion_group.inputs())), 1)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_multiple_cuda(self):
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device='cuda'),
torch.randn(5, 6, 3, dtype=torch.float, device='cuda'),
torch.randn(10, 2, 3, dtype=torch.float, device='cuda'),
torch.randn(5, 2, 6, dtype=torch.float, device='cuda'),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_minmax(self):
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float, device="cuda")
b = torch.randn(4, 4, dtype=torch.float, device="cuda")
nan = torch.tensor(float('nan'), dtype=torch.float, device="cuda")
for f, inputs in product(
(tmax, tmin),
([a, b], [a, nan], [b, nan])):
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_clamp(self):
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float('inf'))
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device='cuda')
nan = torch.tensor(float('nan'), dtype=torch.float, device='cuda')
funcs = (func2, funcInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
f.__disable_jit_function_caching__ = True
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(inp1, inp2), except_for={'aten::size', 'aten::_size_if_not_equal'})
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::Float', 'aten::_grad_sum_to_size'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_dropout(self):
def func(x):
x = torch.nn.functional.dropout(x)
return torch.nn.functional.relu(x)
a = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
s = torch.jit.script(func)
c = s(a)
c = s(a)
warmup_backward(c.sum())
# skip_check to skip extra bailout nodes in between
graph = backward_graph(s, skip_check=True)
self.assertAllFused(graph, except_for={'aten::div', 'prim::Constant'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_eq_ne(self):
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_gt_lt_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_ge_le_cuda(self):
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(ge.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_addcmul_cuda(self):
t = torch.randn(1, 4, dtype=torch.float, device='cuda')
t1 = torch.randn(4, 1, dtype=torch.float, device='cuda')
t2 = torch.randn(1, 4, dtype=torch.float, device='cuda')
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
self.assertAllFused(graph)
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lerp(self):
start = torch.randn(4, 1, dtype=torch.float, device='cuda')
end = torch.randn(1, 4, dtype=torch.float, device='cuda')
weight = torch.tensor(0.5, dtype=torch.float, device='cuda')
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
graph = ge_weight_tensor.graph_for(start, end)
self.assertAllFused(graph)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_concat_cuda(self):
hx = torch.randn(3, 20, dtype=torch.float, device='cuda')
cx = torch.randn(3, 20, dtype=torch.float, device='cuda')
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_concat_invariant_cuda(self):
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device='cuda')
y = torch.randn(2, 2, dtype=torch.float, device='cuda')
z = torch.randn(4, 2, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={'aten::add'})
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + .5 * y).exp()
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_exp_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "broken with profiling on")
@torch._jit_internal._disable_emit_hooks_decorator
@_inline_everything
def test_fuse_decompose_normalization(self):
class ResLike(torch.jit.ScriptModule):
def __init__(self, norm_module):
super().__init__()
self.nm = norm_module
@torch.jit.script_method
def forward(self, x, y):
return y + torch.relu(self.nm(x))
def test_norm_decompose(nm, in_opt_graph, not_in_opt_graph, in_fusegraph):
model = ResLike(nm).cuda()
model_noopt = ResLike(nm).cuda()
model_noopt.load_state_dict(model.state_dict())
x = torch.randn(2, 16, 8, 8, device='cuda')
y = torch.randn(2, 16, 8, 8, device='cuda')
# FIXME: We need differentiation for CNNs for this optimization to trigger
with torch.no_grad():
out = model(x, y)
graph = model.graph_for(x, y)
rep = str(graph)
with torch.jit.optimized_execution(False):
out_noopt = model_noopt(x, y)
rep_noopt = str(model_noopt.graph_for(x, y))
self.assertEqual(out, out_noopt, atol=3e-5)
# Check that normalization op has really been decomposed
for node_in_graph in in_opt_graph:
self.assertIn(node_in_graph, rep)
for node_not_in_graph in not_in_opt_graph:
self.assertNotIn(node_not_in_graph, rep)
self.assertIn(node_not_in_graph, rep_noopt)
fusion_groups = [node for node in graph.nodes() if node.kind() == 'prim::FusionGroup']
self.assertEqual(len(fusion_groups), 1)
fused_graph = str(fusion_groups[0].g('Subgraph'))
for node_in_fusegraph in in_fusegraph:
self.assertIn(node_in_fusegraph, fused_graph)
# test for batchnorm decompose
bm = nn.BatchNorm2d(16)
test_norm_decompose(bm, ['aten::batch_norm_update_stats'],
['aten::batch_norm('], ['aten::sqrt'])
# test for layernorm decompose
lm = nn.LayerNorm(8)
test_norm_decompose(lm, ['aten::batch_norm_stats'],
['aten::layer_norm('], ['aten::sub', 'aten::mul', 'aten::add'])
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_threshold(self):
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device='cuda')
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_scalar_arg_cuda(self):
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
self.assertAllFused(scripted.graph_for(x, p), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@unittest.skip("deduplicating introduces aliasing in backward graph's outputs")
@enable_cpu_fuser
def test_fuser_deduplication(self):
# See that fusion kernel outputs are deduplicated when removing _grad_sum_to_size in the fuser's compilation
# see the discussion in PR #14957.
def f(x, y):
return torch.sigmoid(x + y)
b = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
s = self.checkScript(f, (a, b))
self.assertAllFused(s.graph_for(a, b), except_for={
'aten::size', 'aten::_size_if_not_equal', 'prim::BroadcastSizes'})
c = s(a, b)
results = warmup_backward(c.sum(), [a, b])
ga2, gb2 = results.pop()
graph = backward_graph(s)
self.assertAllFused(graph)
# check that a, b share storage, i.e. were generated as a single output in the fuser
self.assertEqual(ga2.data_ptr(), gb2.data_ptr())
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
@unittest.skip("temporarily disabled because fusion was restricted in fixing #22833")
def test_fuser_iou(self):
# This checks if most of Intersection over Union is fused.
# In particular, the backward contains many _grad_sum_to_size.
def iou(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2):
ltx = torch.max(b1x1, b2x1) # [N,M]
lty = torch.max(b1y1, b2y1)
rbx = torch.min(b1x2, b2x2)
rby = torch.min(b1y2, b2y2)
w = (rbx - ltx).clamp(min=0, max=float('inf')) # [N,M]
h = (rby - lty).clamp(min=0, max=float('inf')) # [N,M]
inter = w * h # [N,M]
area1 = (b1x2 - b1x1) * (b1y2 - b1y2) # [N,1]
area2 = (b2x2 - b2x1) * (b2y2 - b2y2) # [1,M]
iou = inter / (area1 + area2 - inter)
return iou
box1 = torch.randn(5, 4, requires_grad=True)
box2 = torch.randn(5, 4, requires_grad=True)
# unsqueezing can currently not be fused
b1x1 = box1[:, 0].unsqueeze(1) # [N,1]
b1y1 = box1[:, 1].unsqueeze(1)
b1x2 = box1[:, 2].unsqueeze(1)
b1y2 = box1[:, 3].unsqueeze(1)
b2x1 = box2[:, 0].unsqueeze(0) # [1,N]
b2y1 = box2[:, 1].unsqueeze(0)
b2x2 = box2[:, 2].unsqueeze(0)
b2y2 = box2[:, 3].unsqueeze(0)
s = self.checkScript(iou, (b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2))
self.assertAllFused(s.graph_for(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2),
except_for={'aten::size', 'prim::BroadcastSizes', 'aten::_size_if_not_equal'})
with enable_profiling_mode_for_profiling_tests(True):
c = s(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2)
warmup_backward(c.sum(), [b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2])
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::size', 'prim::BroadcastSizes', 'aten::_size_if_not_equal'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
@enable_cpu_fuser
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
@enable_cpu_fuser
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device='cuda:0'),
torch.randn(4, 4, dtype=torch.float, device='cuda:1'),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), 'prim::FusionGroup', 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = 'cuda:' + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lstm_cuda(self):
inputs = get_lstm_inputs('cuda', training=True)
module = self.checkScript(LSTMCellS, inputs)
return
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, 'prim::FusionGroup', 1, consider_subgraphs=True)
self.assertTrue(len(strip_profiling_nodes(forward_graph.nodes())) == 2)
# Everything is differentiable but TupleConstruct return
FileCheck().check("DifferentiableGraph").check_next("TupleConstruct") \
.check_next("return").run(str(forward_graph))
with enable_profiling_mode_for_profiling_tests(True):
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
backward = backward_graph(module)
self.assertAllFused(backward, except_for=("aten::t", "aten::mm",
"aten::_grad_sum_to_size"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
# By default, on Ampere or later GPUs, LSTM computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_lstm_concat_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lstm_gates_permutations_cuda(self):
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ['x.mm(w_ih.t())', 'hx.mm(w_hh.t())', 'b_ih', 'b_hh']
template = dedent('''
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
''')
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
inputs = get_lstm_inputs('cuda', training=False)
self.assertEqual(cu.cell(*inputs), scope['cell'](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, 'prim::FusionGroup', 1)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
# By default, on Ampere or later GPUs, LSTM computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_lstm_traced_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
# .check_not("aten::add") don't get pulled into FusionGroup because of BailOuts
FileCheck().check_not("Chunk").check_not("aten::sigmoid") \
.check_not("aten::tanh").check("FusionGroup").check_next("TupleConstruct") \
.check_next("return").check_not("FusionGroup_2").run(str(graph))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/8746")
@enable_cpu_fuser
def test_lstm_traced_cpu(self):
inputs = get_lstm_inputs('cpu')
try:
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
FileCheck.check("FusionGroup").run(str(graph))
except RuntimeError as e:
if 'Failed to compile' in e.args[0]:
warnings.warn('CPU fuser test has failed! This is not a hard failure, '
'because the kernels sometimes trigger bugs in compilers '
'(most notably GCC 7.2).')
raise unittest.SkipTest('Failed to compile') from e
else:
raise
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_milstm_cuda(self):
inputs = get_milstm_inputs('cuda', training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, 'prim::FusionGroup', 1, consider_subgraphs=True)
FileCheck().check("DifferentiableGraph").check_next("TupleConstruct") \
.check_next("return").check("FusionGroup").run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "borked on the legacy executor")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self):
super().__init__()
self.d = torch.device('cuda')
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_relu_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_erf_cuda(self):
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn_test_erf, (x,))
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkTrace(fn_test_erf, (x,))
self.assertAllFused(ge.graph_for(x), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "borked on the legacy executor")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device='cuda')
y = torch.ones(4, dtype=torch.float, device='cuda')
out = script_f(x, y)
self.assertEqual(out[0], out[1])
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device='cpu')
y = torch.tensor(1, dtype=torch.float, device='cpu')
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_small_constant_cuda(self):
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_tensor_scalar_ops_cuda(self):
def should_fuse(x):
z = 3.
y = x + z
return x * y
# XXX: right now we only support fusing scalars if
# they're constant (#9940)
def should_not_fuse(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device='cuda')]
ge = self.checkScript(should_fuse, inputs)
self.assertAllFused(ge.graph_for(*inputs))
inputs = [
torch.randn(2, 2, dtype=torch.float, device='cuda'),
torch.tensor(3., dtype=torch.float, device='cuda'),
]
ge = self.checkScript(should_not_fuse, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), 'prim::FusionGroup', 0, consider_subgraphs=True)
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_where_and_typing(self):
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double)
y = torch.randn(4, 4, dtype=torch.double)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(script_f.graph_for(x, y), except_for={'prim::TupleConstruct'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_grad_sum_to_size_elimination(self):
def my_broadcasted_cell(a, b, c):
return (a + b) + c
s1 = torch.randn(5, 1, requires_grad=True, device='cuda')
s2 = torch.randn(5, 5, requires_grad=True, device='cuda')
module = self.checkScript(my_broadcasted_cell, (s1, s1, s1), profiling=ProfilingMode.PROFILING)
forward_graph = module.graph_for(s1, s1, s1)
self.assertAllFused(forward_graph, except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
old_plans = set()
for i in range(3):
# if we have s2, then the s1 are _grad_sum_to_size'd
args = s2 if i < 1 else s1, s2 if i < 2 else s1, s2
args = [a.detach_().requires_grad_() for a in args]
# recompile, so we don't trigger bailouts
module = self.checkScript(my_broadcasted_cell, args, profiling=ProfilingMode.PROFILING)
res = module(s2 if i < 1 else s1, s2 if i < 2 else s1, s2)
warmup_backward(res.sum(), args)
grads = torch.autograd.grad(res.sum(), args)
for inp, gr in zip(args, grads):
self.assertEqual(inp.shape, gr.shape)
backward = None
# this is a workaround for the backward graphs not being
# in order for Python 2
for g in all_backward_graphs(module):
if str(g) not in old_plans:
assert backward is None
backward = g
old_plans.add(str(backward))
num_grads = 1 if i > 0 else 0
self.assertEqual(len([n for n in backward.nodes() if n.kind() == 'aten::_grad_sum_to_size']), num_grads)
if __name__ == '__main__':
run_tests()
|
def warmup_forward(f, *args):
profiling_count = 2
for i in range(profiling_count):
results = f(*args)
return results
@skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "skip due to SIGIOT failures, #67646")
class TestFuser(JitTestCase):
def assertAllFused(self, graph, except_for=()):
diff_graphs = [n for n in graph.nodes() if n.kind() == 'prim::DifferentiableGraph']
if len(diff_graphs) > 0:
self.assertEqual(len(diff_graphs), 1)
graph = diff_graphs[0].g('Subgraph')
allowed_nodes = {'prim::Constant', 'prim::FusionGroup', 'prim::BailoutTemplate',
'prim::BailOut', 'prim::TupleConstruct'} | set(except_for)
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
f'got {graph}')
self.assertTrue([node.kind() for node in graph.nodes()].count('prim::FusionGroup') == 1)
def _test_fused_abs(self, device='cpu'):
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertAllFused(scripted.graph_for(a))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu(self):
self._test_fused_abs()
@unittest.skipIf(not IS_WINDOWS, "This is meant to be Windows-specific")
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu_unicode_temp_dir(self):
with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname:
shell_env = os.environ.copy()
shell_env['TMP'] = dname
cmd = [sys.executable, os.path.basename(__file__), type(self).__name__ + '.test_abs_cpu']
legacy_jit_flag = '--jit-executor=legacy'
for v in sys.argv:
if v == legacy_jit_flag:
cmd.append(legacy_jit_flag)
return_code = shell(cmd, cwd=os.path.dirname(__file__), env=shell_env)
self.assertEqual(return_code, 0)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_abs_cuda(self):
self._test_fused_abs(device="cuda")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_zero_element_tensors(self):
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device="cuda")
cos = torch.zeros(0, device="cuda")
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_arg_configurations_smoke_cuda(self):
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
traced_f = torch.jit.trace(f, (x, y,))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_broadcast_cuda(self):
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device='cuda'),
torch.randn(4, dtype=torch.float, device='cuda'),
torch.randn(4, dtype=torch.float, device='cuda'),
]
ge = self.checkTrace(scaleshift, inputs)
self.assertAllFused(ge.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no bfloat support with profiling on")
def test_cuda_bfloat16(self):
def foo(x, y):
return (x + y).relu()
m = torch.jit.script(foo)
x = torch.randn(65536).cuda().bfloat16()
y = torch.randn_like(x)
self.assertAllFused(m.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device='cuda')
y = torch.randn(4, 4, dtype=torch.half, device='cuda')
funcs = [
self.fn_test_comparison_gt_lt,
self.fn_test_relu,
self.fn_test_exp
]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(), local_inputs, allow_unused=True, retain_graph=True)
fusion_grads = torch.autograd.grad(
fusion_output.sum(), local_fusion_inputs, allow_unused=True, retain_graph=True)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_checks_cat_inputs(self):
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x ** 2, y + 4 * y + y ** 3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device='cuda')
y = torch.randn(1, 4, dtype=torch.float, device='cuda')
scripted = self.checkScript(f, (x, y))
self.assertAllFused(scripted.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_remainder_cuda(self):
def cuda_rem(x, y):
return 1 + torch.remainder(x, y) - 1
a = torch.rand([512], dtype=torch.float).cuda()
b = torch.rand([512], dtype=torch.float).cuda()
inputs = [a, b]
ge = self.checkScript(cuda_rem, inputs)
graph = ge.graph_for(*inputs)
self.assertAllFused(graph)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_cuda(self):
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device='cuda')]
ge = self.checkScript(fn, inputs)
graph = ge.graph_for(*inputs)
self.assertAllFused(graph)
FileCheck().check("prim::ConstantChunk[chunks=3, dim=1]").run(str(graph))
@staticmethod
def _test_chunk_correctness(self, device='cpu'):
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(1, 2),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_chunk_correctness(self):
return self._test_chunk_correctness(self, 'cpu')
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_correctness_cuda(self):
return self._test_chunk_correctness(self, 'cuda')
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_chunk_distributes_cuda(self):
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
FileCheck().check("broadcast_tensors").check('with prim::FusionGroup_') \
.check_count('ConstantChunk', 2, exactly=True).run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_chunk_motion_deduplicates_inputs(self):
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [
torch.tensor([1.1, 1.2], device='cuda', dtype=torch.float),
]
for func in [func1, func2]:
module = self.checkScript(func, inputs)
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, 'prim::FusionGroup', 1)
fusion_group = list(forward_graph.nodes())[-1]
self.assertEqual(len(list(fusion_group.inputs())), 1)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_multiple_cuda(self):
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device='cuda'),
torch.randn(5, 6, 3, dtype=torch.float, device='cuda'),
torch.randn(10, 2, 3, dtype=torch.float, device='cuda'),
torch.randn(5, 2, 6, dtype=torch.float, device='cuda'),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_minmax(self):
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float, device="cuda")
b = torch.randn(4, 4, dtype=torch.float, device="cuda")
nan = torch.tensor(float('nan'), dtype=torch.float, device="cuda")
for f, inputs in product(
(tmax, tmin),
([a, b], [a, nan], [b, nan])):
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_clamp(self):
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float('inf'))
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device='cuda')
nan = torch.tensor(float('nan'), dtype=torch.float, device='cuda')
funcs = (func2, funcInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
f.__disable_jit_function_caching__ = True
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(inp1, inp2), except_for={'aten::size', 'aten::_size_if_not_equal'})
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::Float', 'aten::_grad_sum_to_size'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_dropout(self):
def func(x):
x = torch.nn.functional.dropout(x)
return torch.nn.functional.relu(x)
a = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
s = torch.jit.script(func)
c = s(a)
c = s(a)
warmup_backward(c.sum())
# skip_check to skip extra bailout nodes in between
graph = backward_graph(s, skip_check=True)
self.assertAllFused(graph, except_for={'aten::div', 'prim::Constant'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_eq_ne(self):
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_gt_lt_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_ge_le_cuda(self):
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(ge.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_addcmul_cuda(self):
t = torch.randn(1, 4, dtype=torch.float, device='cuda')
t1 = torch.randn(4, 1, dtype=torch.float, device='cuda')
t2 = torch.randn(1, 4, dtype=torch.float, device='cuda')
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
self.assertAllFused(graph)
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lerp(self):
start = torch.randn(4, 1, dtype=torch.float, device='cuda')
end = torch.randn(1, 4, dtype=torch.float, device='cuda')
weight = torch.tensor(0.5, dtype=torch.float, device='cuda')
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
graph = ge_weight_tensor.graph_for(start, end)
self.assertAllFused(graph)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_concat_cuda(self):
hx = torch.randn(3, 20, dtype=torch.float, device='cuda')
cx = torch.randn(3, 20, dtype=torch.float, device='cuda')
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_concat_invariant_cuda(self):
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device='cuda')
y = torch.randn(2, 2, dtype=torch.float, device='cuda')
z = torch.randn(4, 2, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={'aten::add'})
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + .5 * y).exp()
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_exp_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "broken with profiling on")
@torch._jit_internal._disable_emit_hooks_decorator
@_inline_everything
def test_fuse_decompose_normalization(self):
class ResLike(torch.jit.ScriptModule):
def __init__(self, norm_module):
super().__init__()
self.nm = norm_module
@torch.jit.script_method
def forward(self, x, y):
return y + torch.relu(self.nm(x))
def test_norm_decompose(nm, in_opt_graph, not_in_opt_graph, in_fusegraph):
model = ResLike(nm).cuda()
model_noopt = ResLike(nm).cuda()
model_noopt.load_state_dict(model.state_dict())
x = torch.randn(2, 16, 8, 8, device='cuda')
y = torch.randn(2, 16, 8, 8, device='cuda')
# FIXME: We need differentiation for CNNs for this optimization to trigger
with torch.no_grad():
out = model(x, y)
graph = model.graph_for(x, y)
rep = str(graph)
with torch.jit.optimized_execution(False):
out_noopt = model_noopt(x, y)
rep_noopt = str(model_noopt.graph_for(x, y))
self.assertEqual(out, out_noopt, atol=3e-5)
# Check that normalization op has really been decomposed
for node_in_graph in in_opt_graph:
self.assertIn(node_in_graph, rep)
for node_not_in_graph in not_in_opt_graph:
self.assertNotIn(node_not_in_graph, rep)
self.assertIn(node_not_in_graph, rep_noopt)
fusion_groups = [node for node in graph.nodes() if node.kind() == 'prim::FusionGroup']
self.assertEqual(len(fusion_groups), 1)
fused_graph = str(fusion_groups[0].g('Subgraph'))
for node_in_fusegraph in in_fusegraph:
self.assertIn(node_in_fusegraph, fused_graph)
# test for batchnorm decompose
bm = nn.BatchNorm2d(16)
test_norm_decompose(bm, ['aten::batch_norm_update_stats'],
['aten::batch_norm('], ['aten::sqrt'])
# test for layernorm decompose
lm = nn.LayerNorm(8)
test_norm_decompose(lm, ['aten::batch_norm_stats'],
['aten::layer_norm('], ['aten::sub', 'aten::mul', 'aten::add'])
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_threshold(self):
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device='cuda')
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_scalar_arg_cuda(self):
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
self.assertAllFused(scripted.graph_for(x, p), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@unittest.skip("deduplicating introduces aliasing in backward graph's outputs")
@enable_cpu_fuser
def test_fuser_deduplication(self):
# See that fusion kernel outputs are deduplicated when removing _grad_sum_to_size in the fuser's compilation
# see the discussion in PR #14957.
def f(x, y):
return torch.sigmoid(x + y)
b = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
s = self.checkScript(f, (a, b))
self.assertAllFused(s.graph_for(a, b), except_for={
'aten::size', 'aten::_size_if_not_equal', 'prim::BroadcastSizes'})
c = s(a, b)
results = warmup_backward(c.sum(), [a, b])
ga2, gb2 = results.pop()
graph = backward_graph(s)
self.assertAllFused(graph)
# check that a, b share storage, i.e. were generated as a single output in the fuser
self.assertEqual(ga2.data_ptr(), gb2.data_ptr())
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
@unittest.skip("temporarily disabled because fusion was restricted in fixing #22833")
def test_fuser_iou(self):
# This checks if most of Intersection over Union is fused.
# In particular, the backward contains many _grad_sum_to_size.
def iou(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2):
ltx = torch.max(b1x1, b2x1) # [N,M]
lty = torch.max(b1y1, b2y1)
rbx = torch.min(b1x2, b2x2)
rby = torch.min(b1y2, b2y2)
w = (rbx - ltx).clamp(min=0, max=float('inf')) # [N,M]
h = (rby - lty).clamp(min=0, max=float('inf')) # [N,M]
inter = w * h # [N,M]
area1 = (b1x2 - b1x1) * (b1y2 - b1y2) # [N,1]
area2 = (b2x2 - b2x1) * (b2y2 - b2y2) # [1,M]
iou = inter / (area1 + area2 - inter)
return iou
box1 = torch.randn(5, 4, requires_grad=True)
box2 = torch.randn(5, 4, requires_grad=True)
# unsqueezing can currently not be fused
b1x1 = box1[:, 0].unsqueeze(1) # [N,1]
b1y1 = box1[:, 1].unsqueeze(1)
b1x2 = box1[:, 2].unsqueeze(1)
b1y2 = box1[:, 3].unsqueeze(1)
b2x1 = box2[:, 0].unsqueeze(0) # [1,N]
b2y1 = box2[:, 1].unsqueeze(0)
b2x2 = box2[:, 2].unsqueeze(0)
b2y2 = box2[:, 3].unsqueeze(0)
s = self.checkScript(iou, (b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2))
self.assertAllFused(s.graph_for(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2),
except_for={'aten::size', 'prim::BroadcastSizes', 'aten::_size_if_not_equal'})
with enable_profiling_mode_for_profiling_tests(True):
c = s(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2)
warmup_backward(c.sum(), [b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2])
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::size', 'prim::BroadcastSizes', 'aten::_size_if_not_equal'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
@enable_cpu_fuser
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
@enable_cpu_fuser
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device='cuda:0'),
torch.randn(4, 4, dtype=torch.float, device='cuda:1'),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), 'prim::FusionGroup', 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = 'cuda:' + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lstm_cuda(self):
inputs = get_lstm_inputs('cuda', training=True)
module = self.checkScript(LSTMCellS, inputs)
return
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, 'prim::FusionGroup', 1, consider_subgraphs=True)
self.assertTrue(len(strip_profiling_nodes(forward_graph.nodes())) == 2)
# Everything is differentiable but TupleConstruct return
FileCheck().check("DifferentiableGraph").check_next("TupleConstruct") \
.check_next("return").run(str(forward_graph))
with enable_profiling_mode_for_profiling_tests(True):
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
backward = backward_graph(module)
self.assertAllFused(backward, except_for=("aten::t", "aten::mm",
"aten::_grad_sum_to_size"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
# By default, on Ampere or later GPUs, LSTM computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_lstm_concat_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lstm_gates_permutations_cuda(self):
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ['x.mm(w_ih.t())', 'hx.mm(w_hh.t())', 'b_ih', 'b_hh']
template = dedent('''
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
''')
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
inputs = get_lstm_inputs('cuda', training=False)
self.assertEqual(cu.cell(*inputs), scope['cell'](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, 'prim::FusionGroup', 1)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
# By default, on Ampere or later GPUs, LSTM computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_lstm_traced_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
# .check_not("aten::add") don't get pulled into FusionGroup because of BailOuts
FileCheck().check_not("Chunk").check_not("aten::sigmoid") \
.check_not("aten::tanh").check("FusionGroup").check_next("TupleConstruct") \
.check_next("return").check_not("FusionGroup_2").run(str(graph))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/8746")
@enable_cpu_fuser
def test_lstm_traced_cpu(self):
inputs = get_lstm_inputs('cpu')
try:
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
FileCheck.check("FusionGroup").run(str(graph))
except RuntimeError as e:
if 'Failed to compile' in e.args[0]:
warnings.warn('CPU fuser test has failed! This is not a hard failure, ' # noqa: F821
'because the kernels sometimes trigger bugs in compilers '
'(most notably GCC 7.2).')
raise unittest.SkipTest('Failed to compile') from e
else:
raise
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_milstm_cuda(self):
inputs = get_milstm_inputs('cuda', training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, 'prim::FusionGroup', 1, consider_subgraphs=True)
FileCheck().check("DifferentiableGraph").check_next("TupleConstruct") \
.check_next("return").check("FusionGroup").run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "borked on the legacy executor")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self) -> None:
super().__init__()
self.d = torch.device('cuda')
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_relu_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_erf_cuda(self):
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn_test_erf, (x,))
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkTrace(fn_test_erf, (x,))
self.assertAllFused(ge.graph_for(x), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "borked on the legacy executor")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device='cuda')
y = torch.ones(4, dtype=torch.float, device='cuda')
out = script_f(x, y)
self.assertEqual(out[0], out[1])
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device='cpu')
y = torch.tensor(1, dtype=torch.float, device='cpu')
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_small_constant_cuda(self):
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_tensor_scalar_ops_cuda(self):
def should_fuse(x):
z = 3.
y = x + z
return x * y
# XXX: right now we only support fusing scalars if
# they're constant (#9940)
def should_not_fuse(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device='cuda')]
ge = self.checkScript(should_fuse, inputs)
self.assertAllFused(ge.graph_for(*inputs))
inputs = [
torch.randn(2, 2, dtype=torch.float, device='cuda'),
torch.tensor(3., dtype=torch.float, device='cuda'),
]
ge = self.checkScript(should_not_fuse, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), 'prim::FusionGroup', 0, consider_subgraphs=True)
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_where_and_typing(self):
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double)
y = torch.randn(4, 4, dtype=torch.double)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(script_f.graph_for(x, y), except_for={'prim::TupleConstruct'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_grad_sum_to_size_elimination(self):
def my_broadcasted_cell(a, b, c):
return (a + b) + c
s1 = torch.randn(5, 1, requires_grad=True, device='cuda')
s2 = torch.randn(5, 5, requires_grad=True, device='cuda')
module = self.checkScript(my_broadcasted_cell, (s1, s1, s1), profiling=ProfilingMode.PROFILING)
forward_graph = module.graph_for(s1, s1, s1)
self.assertAllFused(forward_graph, except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
old_plans = set()
for i in range(3):
# if we have s2, then the s1 are _grad_sum_to_size'd
args = s2 if i < 1 else s1, s2 if i < 2 else s1, s2
args = [a.detach_().requires_grad_() for a in args]
# recompile, so we don't trigger bailouts
module = self.checkScript(my_broadcasted_cell, args, profiling=ProfilingMode.PROFILING)
res = module(s2 if i < 1 else s1, s2 if i < 2 else s1, s2)
warmup_backward(res.sum(), args)
grads = torch.autograd.grad(res.sum(), args)
for inp, gr in zip(args, grads):
self.assertEqual(inp.shape, gr.shape)
backward = None
# this is a workaround for the backward graphs not being
# in order for Python 2
for g in all_backward_graphs(module):
if str(g) not in old_plans:
assert backward is None
backward = g
old_plans.add(str(backward))
num_grads = 1 if i > 0 else 0
self.assertEqual(len([n for n in backward.nodes() if n.kind() == 'aten::_grad_sum_to_size']), num_grads)
if __name__ == '__main__':
run_tests()
|
import unittest
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from unittest import skipIf
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
from textwrap import dedent
from itertools import product, permutations
from torch.testing._internal.common_cuda import with_tf32_off
from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
|
import unittest
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from unittest import skipIf
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
from textwrap import dedent
from itertools import product, permutations
from torch.testing._internal.common_cuda import with_tf32_off
from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jit_fuser_te.py
|
inline_fusion_groups
|
def inline_fusion_groups():
old_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(True)
try:
yield
finally:
torch._C._debug_set_fusion_group_inlining(old_inlining)
@skipIfTorchDynamo()
class TestTEFuser(JitTestCase):
def setUp(self):
super().setUp()
self.tensorexpr_options = TensorExprTestOptions()
# note: `self.dynamic_shapes` instatiated in specialization of class
# defined below
fusion_strategy = [("DYNAMIC", 20)] if self.dynamic_shapes else [("STATIC", 20)]
self.old_fusion_strategy = torch._C._jit_set_fusion_strategy(fusion_strategy)
self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
self.int_dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.bool,
]
self.fp_dtypes = [
torch.float16,
torch.float32,
torch.float64,
torch.bfloat16,
]
self.dtypes = self.int_dtypes + self.fp_dtypes
def tearDown(self):
self.tensorexpr_options.restore()
torch._C._jit_set_fusion_strategy(self.old_fusion_strategy)
super().tearDown()
def assertAllFused(self, graph, except_for=None):
except_for = except_for if except_for is not None else set()
# TODO - upstream
guards = "prim::TypeCheck", "prim::RequiresGradCheck", "prim::TensorExprDynamicGuard"
guard_found = False
def autodiff_guard(node):
if node.kind() != "aten::all":
return False
inps = list(node.inputs())
if len(inps) != 1 or inps[0].node().kind() != "prim::ListConstruct":
return False
li_inps = list(inps[0].node().inputs())
for li_inp in li_inps:
if li_inp.node().kind() in ("prim::AutogradAllNonZero", "prim::AutogradAllZero"):
return True
return False
def is_guard(node):
return node.kind() in guards or autodiff_guard(node)
for node in graph.block().nodes():
if node.kind() == "prim::Constant":
continue
if is_guard(node):
self.assertFalse(guard_found)
guard_found = True
continue
if node.kind() in except_for:
continue
if node.kind() == "prim::If":
self.assertTrue(is_guard(node.prev()))
continue
self.assertTrue(False, "Found unexpected node:" + node.kind())
self.assertTrue(guard_found)
def assertLastGraphAllFused(self):
self.assertAllFused(torch.jit.last_executed_optimized_graph())
def findFusionGroups(self, graph):
result = []
for n in graph.nodes():
if n.kind() == FUSION_GROUP:
result.append(n.g('Subgraph'))
continue
for block in n.blocks():
result += self.findFusionGroups(block)
return result
def test_typecheck(self):
a = torch.ones(1)
def fused_kernel(a, b):
return (a + b) * 2.
scripted = self.checkScript(fused_kernel, (a, a))
graph = scripted.graph_for(a, a)
# double check we fused
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
# we use a bigger tensor now (size 2)
# if we won't trigger a recompilation
# we will still create a tensor up to (size 1)
# if the type check fails
a = torch.ones(2)
# shape changed if we don't trigger recompilation
# we would compute the wrong result silently
self.assertEqual(scripted(a, a), fused_kernel(a, a))
def test_sum_simple(self):
def func(x):
x2 = x * x
return x2.sum()
with texpr_reductions_enabled():
a = torch.tensor(list(range(0, 15)), dtype=torch.float, device='cpu')
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_nop(self):
pass
def test_sum_dim(self):
def func(x):
return x.sum((0, )) * 2
def func_neg(x):
return x.sum((-2, )) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(range(0, 15)), dtype=torch.float, device='cpu')
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
scripted = self.checkScript(func_neg, (a,))
self.assertLastGraphAllFused()
def test_sum_keepdim_cast(self):
def func(x):
return x.sum((0, ), keepdim=True, dtype=torch.double) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(range(0, 15)), dtype=torch.float, device='cpu')
a = a.reshape(5, 3)
self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_abs(self):
for device in self.devices:
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_unsqueeze_size_calculation(self):
for device in self.devices:
def foo(b, d):
x = d.unsqueeze(1)
y = x * 42.
z = b + y
r = z / 42.
return r
inputs = (torch.rand(20, 28, device=device, requires_grad=True), torch.rand(20, device=device))
scripted = self.checkScript(foo, inputs)
self.assertAllFused(scripted.graph_for(*inputs))
def test_zero_element_tensors(self):
for device in self.devices:
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device=device)
cos = torch.zeros(0, device=device)
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
def test_arg_configurations_smoke(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
traced_f = torch.jit.trace(f, (x, y,))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
def test_broadcast(self):
for device in self.devices:
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
]
self.checkScript(scaleshift, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device='cuda')
y = torch.randn(4, 4, dtype=torch.half, device='cuda')
funcs = [
self.fn_test_comparison_gt_lt,
self.fn_test_relu,
self.fn_test_exp
]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(), local_inputs, allow_unused=True, retain_graph=True)
fusion_grads = torch.autograd.grad(
fusion_output.sum(), local_fusion_inputs, allow_unused=True, retain_graph=True)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
def test_checks_cat_inputs(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x ** 2, y + 4 * y + y ** 3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device=device)
y = torch.randn(1, 4, dtype=torch.float, device=device)
scripted = self.checkScript(f, (x, y))
self.assertEqual(scripted(x, y).shape, (3, 4))
self.assertAllFused(scripted.graph_for(x, y))
def test_chunk(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device=device)]
self.checkScript(fn, inputs)
self.assertLastGraphAllFused()
def test_chunk_correctness(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(1, 2),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
self.assertLastGraphAllFused()
def test_chunk_distributes(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# XXX: The old fuser does broadcast_tensors but the new fuser doesn't.
# FileCheck().check("broadcast_tensors").check('with ' + FUSION_GROUP + '_') \
# .check_count('ConstantChunk', 2, exactly=True).run(str(graph))
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
def test_chunk_motion_deduplicates_inputs(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [
torch.tensor([1.1, 1.2], device=device, dtype=torch.float),
]
for func in [func1, func2]:
self.checkScript(func, inputs)
self.assertLastGraphAllFused()
def test_chunk_multiple(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 6, 3, dtype=torch.float, device=device),
torch.randn(10, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 2, 6, dtype=torch.float, device=device),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
def test_minmax(self):
for device in self.devices:
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float)
b = torch.randn(4, 4, dtype=torch.float)
nan = torch.tensor(float('nan'), dtype=torch.float)
for f, inputs, device in product(
(tmax, tmin),
([a, b], [a, nan], [b, nan]),
self.devices):
inputs = [t.to(device) for t in inputs]
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
def test_clamp(self):
for device in self.devices:
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float('inf'))
def funcNegInf(a, b):
return torch.clamp(a + b, min=float('-inf'), max=0)
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device=device, requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device=device)
nan = torch.tensor(float('nan'), dtype=torch.float, device=device)
funcs = (func2, funcInf, funcNegInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(inp1, inp2), except_for={'aten::size', 'aten::_size_if_not_equal'})
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::Float', 'aten::_grad_sum_to_size'}.union(autograd_check_set))
def test_clamp_double(self):
for device in self.devices:
def clamp_double(x, eta: float):
return 1 - x.clamp(eta, 1 - eta)
x = torch.tensor([1.0, 1.0], dtype=torch.double, device=device)
eta = 1e-9
s = self.checkScript(clamp_double, (x, eta), profiling=ProfilingMode.PROFILING, atol=1e-10, rtol=1e-5)
self.assertAllFused(s.graph_for(x, eta), except_for={'aten::sub'})
def test_clamp_int(self):
for device in self.devices:
def clamp_int(x, eta: int):
return x.clamp(0, eta)
x = torch.tensor([1, 1], device=device)
eta = 1 << 32
s = self.checkScript(clamp_int, (x, eta), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(x, eta))
def test_add_bool(self):
sizes = [(1,), (2,), (4, 4)]
for device, size in product(self.devices, sizes):
def f(x, y, z):
return x + y + z
x = torch.randint(0, 2, size, dtype=torch.bool, device=device)
y = torch.randint(0, 2, size, dtype=torch.bool, device=device)
z = torch.randint(0, 2, size, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_mul_bool(self):
for device in self.devices:
def f(x, y, z):
return x * y * z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_div_bool(self):
for device in self.devices:
def f(x, y, z):
return (x + y) / z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.ones_like(x, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_bitwise_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
operator.__lshift__,
operator.__rshift__,
]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_minmax_int_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [
torch.min,
torch.max
]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_comparison_eq_ne(self):
for device in self.devices:
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
def test_comparison_gt_lt(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_comparison_ge_le(self):
for device in self.devices:
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(ge.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
def test_addcmul(self):
for device in self.devices:
t = torch.randn(1, 4, dtype=torch.float, device=device)
t1 = torch.randn(4, 1, dtype=torch.float, device=device)
t2 = torch.randn(1, 4, dtype=torch.float, device=device)
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add(").check("aten::addcmul(").run(str(fusion_groups[0]))
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
def test_lerp(self):
for device in self.devices:
start = torch.randn(4, 1, dtype=torch.float, device=device)
end = torch.randn(1, 4, dtype=torch.float, device=device)
weight = torch.tensor(0.5, dtype=torch.float, device=device)
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
# TODO: uncomment when TE enables support for scalar tensors
# ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
# graph = ge_weight_tensor.graph_for(start, end)
# self.assertAllFused(graph)
def test_concat(self):
# disabling concat causes error with single concat node
with set_fusion_group_inlining(True):
for device in self.devices:
hx = torch.randn(3, 20, dtype=torch.float, device=device)
cx = torch.randn(3, 20, dtype=torch.float, device=device)
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
# XXX: TE fuser can handle concats in a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_remove_output_used_only_in_size(self):
for device in self.devices:
def test_fuse(a, b):
c = a + b
d = c + b
return d
scripted_f = torch.jit.script(test_fuse)
x = torch.ones(1, requires_grad=True, device=device)
y = torch.ones(1, requires_grad=True, device=device)
warmup_forward(scripted_f, x, y, profiling_count=3)
g = scripted_f.graph_for(x, y)
diff_nodes = g.findAllNodes('prim::DifferentiableGraph')
self.assertEqual(len(diff_nodes), 1)
g = diff_nodes[0].g('Subgraph')
if_nodes = [n for n in g.nodes() if n.kind() == 'prim::If']
self.assertEqual(len(if_nodes), 1)
# the if node and the fusion group inside it should only have one output
self.assertEqual(len(list(if_nodes[0].outputs())), 1)
def test_concat_invariant(self):
for device in self.devices:
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device=device)
y = torch.randn(2, 2, dtype=torch.float, device=device)
z = torch.randn(4, 2, dtype=torch.float, device=device)
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={'aten::add'})
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + .5 * y).exp()
def test_exp(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_threshold(self):
for device in self.devices:
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device=device)
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
def test_scalar_arg(self):
for device in self.devices:
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device=device)
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
out = scripted(x, p)
out = scripted(x, p)
self.assertAllFused(scripted.graph_for(x, p), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
# TODO: we're currently not checking 'device' in the type info when pulling
# nodes into a fusion group. We should fix that and re-enable this test.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device='cuda:0'),
torch.randn(4, 4, dtype=torch.float, device='cuda:1'),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), FUSION_GROUP, 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
# FIXME: Use the TE fuser's way of querying the cache.
# self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = 'cuda:' + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_lstm(self):
for device in self.devices:
inputs = get_lstm_inputs(device, training=True)
module = self.checkScript(LSTMCellS, inputs)
self.assertAllFused(module.graph_for(inputs), except_for={"prim::TupleConstruct"})
def test_lstm_concat(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
except_nodes = {"prim::TupleConstruct", "aten::linear"}
# TODO... Chunk
if self.dynamic_shapes:
except_nodes = except_nodes.union({"aten::add", "prim::ConstantChunk"})
self.assertAllFused(ge.graph_for(*inputs), except_for=except_nodes)
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_lstm_gates_permutations(self):
for device in self.devices:
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ['x.mm(w_ih.t())', 'hx.mm(w_hh.t())', 'b_ih', 'b_hh']
template = dedent('''
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
''')
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
fusion_group_len = 2 if self.dynamic_shapes else 1
inputs = get_lstm_inputs(device, training=False)
self.assertEqual(cu.cell(*inputs), scope['cell'](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, FUSION_GROUP, fusion_group_len)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
def test_lstm_traced(self):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertEqual(len(fusion_groups), fusion_group_len)
f = FileCheck()
if not self.dynamic_shapes:
f.check("Chunk")
f.check("aten::sigmoid").check("aten::tanh").run(str(fusion_groups[0 if not self.dynamic_shapes else 1]))
def test_milstm(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
for device in self.devices:
inputs = get_milstm_inputs(device, training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertGraphContainsExactly(
forward_graph, FUSION_GROUP, fusion_group_len, consider_subgraphs=True)
FileCheck().check("DifferentiableGraph").check("TupleConstruct") \
.check_next("return").check(FUSION_GROUP).run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self):
super().__init__()
self.d = torch.device('cuda')
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
def test_relu(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_erf(self):
for device in self.devices:
# only enabled on gpu
if device == 'cpu':
continue
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(ge.graph_for(x), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
# If using profiling, a different function is needed to test different
# shapes, or we'll use a cached script.
def fn_test_rand2(x, y):
r = torch.rand_like(y)
return r * x * x
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device='cuda')
y = torch.ones(4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand2)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out[0, :] + torch.zeros(4, 4, device='cuda'), out)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_diamond(self):
def fn_test_diamond(x, y):
r = torch.rand_like(y)
a = x + r
b = y - r
return a + b
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_diamond)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out, x + y)
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device='cpu')
y = torch.tensor(1, dtype=torch.float, device='cpu')
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_inlined_optimized_graph(self):
@torch.jit.script
def foo(x):
return torch.relu(x + x)
for _ in range(3):
foo(torch.rand([4, 4]))
for _ in range(3):
foo(torch.rand([10]))
for _ in range(3):
foo(torch.rand([2, 2, 2]))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count("prim::If", 1, exactly=True).check("prim::TensorExpr").run(g)
torch._C._jit_pass_inline(g)
f = FileCheck()
for _ in range(3):
f.check("prim::If").check("prim::TensorExpr")
f.run(g)
def test_small_constant(self):
for device in self.devices:
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
# Currently we don't pull constants into fusion groups, because in some
# cases it could remove the constant from the original graph and now our
# fusion group needs to return that constant for its other users.
# Instead of never pulling constants into the fusion group, we should just
# be more careful at how we rewrite its users.
# TODO: fix that and reenable the test.
def test_tensor_scalar_ops(self):
for device in self.devices:
def should_fuse(x):
z = 3.
y = x + z
return x * y
def should_fuse_scalar(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device=device)]
ge = self.checkScript(should_fuse, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add").check("aten::mul").run(str(fusion_groups[0]))
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(3., dtype=torch.float, device=device),
]
ge = self.checkScript(should_fuse_scalar, inputs)
# Check that the fused graph computes correct results when the scalar
# input changes.
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(7., dtype=torch.float, device=device),
]
self.assertEqual(ge(*inputs), should_fuse_scalar(*inputs))
# The TE fuser supports fusion of non-constant scalars
self.assertGraphContainsExactly(
ge.graph_for(*inputs), FUSION_GROUP, 1, consider_subgraphs=True)
def test_where_and_typing(self):
for device in self.devices:
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double, device=device)
y = torch.randn(4, 4, dtype=torch.double, device=device)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(script_f.graph_for(x, y), except_for={'prim::TupleConstruct'})
def test_disabled(self):
old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_override_can_fuse_on_cpu(False)
def fn(a):
return a ** 2 + a
x = torch.randn(4, dtype=torch.float, device="cpu")
s = self.checkScript(fn, (x,))
g = s.graph_for(x)
self.assertEqual(len(self.findFusionGroups(g)), 0)
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuser_state)
def data_for(self, dtype, device="cuda", size=None):
if size is None:
v = torch.arange(1, 3, dtype=torch.float, device=device)
else:
v = torch.rand(*size, device=device)
if dtype == torch.bool:
return v > 2
elif dtype in [torch.qint8, torch.quint8, torch.qint32]:
return torch.quantize_per_tensor(v, 0.1, 1, dtype=dtype)
else:
return v.to(dtype)
def test_torch_to(self):
# test no op
@torch.jit.script
def foo(x):
return x.to(torch.float)
foo(torch.tensor([3.], dtype=torch.float))
foo(torch.tensor([3.], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
# test not fusing non-const inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(dtype)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
# test not fusing to_pinned inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(pin_memory=True)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
# test across-device not supported
if torch.cuda.is_available():
@torch.jit.script
def foo(x):
return x.to(device="cuda")
foo(torch.tensor([3.], dtype=torch.float))
foo(torch.tensor([3.], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
sizes = [(1, 4), (4, 4)]
# reuses cast impl, smaller dtype set for faster test
dtypes = [
torch.bool,
torch.int,
torch.float16,
torch.float32,
torch.float64,
]
class MyMod(torch.nn.Module):
def __init__(self, dtype):
super().__init__()
self.dtype = dtype
def forward(self, x):
return x.to(self.dtype)
bad_dtypes = []
for dtype, output_dtype, device, size in product(dtypes, dtypes, self.devices, sizes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
if dtype == output_dtype:
continue
x = self.data_for(dtype, device, size=size)
mod = MyMod(output_dtype)
ref = mod.forward(x)
# use freezing to make non-Tensor args to `to` constant
mod = torch.jit.freeze(torch.jit.script(mod.eval()))
warmup_forward(mod.forward, x)
self.assertEqual(ref, mod.forward(x))
self.assertLastGraphAllFused()
@unittest.skip("Temporarily disabled")
def test_masked_fill(self):
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
# torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
sizes = [(2,), (4, 4)]
for self_dtype, device, scalar_val, size in product(dtypes, self.devices, [0.4, 3], sizes):
input_v = self.data_for(self_dtype, device, size=size)
mask = self.data_for(torch.bool, device, size=size)
def fn(input_v, mask):
return torch.masked_fill(input_v, mask, scalar_val)
ref = fn(input_v, mask)
try:
t = torch.jit.trace(fn, (input_v, mask))
torch.testing.assert_close(ref, t(input_v, mask))
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(self_dtype), op.__name__, device, str(size)])
) from e
def test_isnan(self):
x = torch.rand([4])
x[0] = float('nan')
inputs = [
x,
torch.tensor([float('nan'), .5])
]
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
for inp, device, dtype in product(inputs, self.devices, dtypes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
inp = inp.to(device=device, dtype=dtype)
try:
f = torch.jit.trace(lambda x: x.isnan(), (inp,))
warmup_forward(f, inp)
self.assertEqual(f(inp), inp.isnan())
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), 'isnan', device])
) from e
def test_gelu(self):
def apply(fn):
return lambda x, approximate: fn(x, approximate)
unary_ops = [
F.gelu,
]
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(self.dtypes, unary_ops, self.devices, sizes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
cond = self.data_for(torch.bool, device)
fn = apply(op)
ref = fn(x, cond)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, cond))
torch.testing.assert_close(ref, t(x, cond))
self.assertAllFused(t.graph_for(x, cond))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device, str(size)])
) from e
def test_unary_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply(fn):
return lambda x: fn(x)
unary_ops = [
torch.lgamma,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
F.relu6,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.sin,
torch.tan,
torch.acos,
torch.asin,
torch.cosh,
torch.sinh,
torch.atan,
torch.tanh,
F.hardtanh,
F.hardsigmoid,
F.hardswish,
F.softplus,
F.silu,
F.mish,
F.elu,
torch.sqrt,
torch.rsqrt,
torch.abs,
# TODO broken on int8 since
# https://github.com/pytorch/pytorch/pull/85144
# RuntimeError: Invalid integral op_type: 23
# torch.ceil,
# torch.floor,
# torch.round,
# torch.trunc,
torch.frac,
# TODO: broken on ROCm?
# F.hardshrink,
F.leaky_relu,
lambda x: torch.threshold(x, 0, -10),
# TODO: broken since type promotion was added
# lambda x: torch.clamp(x, -10, 10),
]
gpu_only = {torch.erf, torch.erfc}
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(self.dtypes, unary_ops, self.devices, sizes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
# todo - re-enable. fails with .500
if dtype == torch.bfloat16 and op == torch.round:
continue
if op in gpu_only and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
fn = apply(op)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x,))
torch.testing.assert_close(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device, str(size)])
) from e
def test_binary_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.min,
torch.max,
lambda x, y: torch.lerp(x, y, 0.5),
torch.atan2,
torch.div,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.lt,
torch.fmod,
torch.remainder,
lambda x, y: y.type_as(x),
]
fp_only = [
torch.fmod,
torch.remainder,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, binary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
self.assertEqual(ref, t(x, y))
if op not in fp_only or dtype.is_floating_point:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_binary_scalar_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
ir_template = """
graph(%x : {dtype_x}, %y : {dtype_y}):
%z = {op}(%x, %y)
return (%z)"""
binary_ops = [
"aten::mul",
"aten::add",
"aten::sub",
"aten::div",
"aten::lt",
"aten::le",
"aten::eq",
"aten::ne",
"aten::gt",
"aten::ge",
"aten::__or__",
"aten::__xor__",
"aten::__and__",
"aten::__lshift__",
"aten::__rshift__",
]
dtypes = ['int', 'float', 'bool']
values = {'int' : [10, 3], 'float' : [12.34, 2.78], 'bool' : [True, False]}
devices = self.devices
for dtype_x, dtype_y, op, device in product(dtypes, dtypes, binary_ops, devices):
code = ir_template.format(**locals())
# Interpret the graph
try:
graph = torch._C.parse_ir(code)
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
except Exception:
# If we can't interpret this IR, don't bother checking NNC.
continue
# Compile the graph
try:
k = torch._C._te.TensorExprKernel(graph)
except Exception as e:
raise RuntimeError(" ".join(["Compilation failed:", device, str(code)])) from e
# Run the graph
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
try:
res = k.run((x, y))
self.assertEqual(ref, res)
except Exception as e:
raise RuntimeError(" ".join(["Failed at runtime:", device, str(x), str(y), str(code)])) from e
def test_matmul(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def fn(x, y):
return torch.matmul(x, y)
devices = ['cpu'] # No cuda support for ext calls yet
sizes = [[[128, 128], [128, 128]],
[[10, 10], [10, 10]],
[[1, 16], [16, 128]],
[[128], [128]],
[[128], [128, 128]],
[[3], [3]],
[[3, 4], [4]],
[[10, 3, 4], [4]],
[[10, 3, 4], [10, 4, 5]],
[[10, 3, 4], [4, 5]],
]
# Only 2D x 2D matrix multiply is supported. For non-supported sizes we
# still want to run results verification to test that we didn't
# accidentally fuse it, but we skip the 'is-fused' check.
# TODO: add support for other shape combinations and make this set empty:
skip_is_fused_check_sizes = ["[[128], [128]]",
"[[128], [128, 128]]",
"[[3], [3]]",
"[[3, 4], [4]]",
"[[10, 3, 4], [4]]",
"[[10, 3, 4], [10, 4, 5]]",
"[[10, 3, 4], [4, 5]]",
]
for dtype, size, device in product(self.dtypes, sizes, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
size_x, size_y = size
x = self.data_for(dtype, device, size=size_x)
y = self.data_for(dtype, device, size=size_y)
ref = fn(x, y)
except Exception as e:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
t(x, y)
self.assertEqual(ref, t(x, y))
if not str(size) in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), device])
) from e
def test_binary_tensor_scalar_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
# FIXME: Fails in IR Eval: torch.int64 and_ cpu
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.eq,
torch.ne,
torch.ge,
torch.lt,
torch.gt,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(self.dtypes, binary_ops, devices, scalars):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_binary_div_ops(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
binary_ops = [
torch.div,
torch.remainder,
torch.fmod,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, -2.0, -1] # skip 0
for dtype, op, device, scalar in product(self.dtypes, binary_ops, devices, scalars):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
except Exception as e:
raise RuntimeError(
"Failed: {} {} {} {}".format(dtype, op.__name__, device, scalar)
) from e
def test_binary_pow(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
dtypes = [
# FIXME: 'pow' fails with dtype=torch.float16/device=cuda/scalar=0
# torch.float16,
torch.float32,
torch.float64,
# torch.bool intentionally not included
]
binary_ops = [
torch.pow,
]
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(dtypes, binary_ops, self.devices, scalars):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_ternary_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
torch.lerp,
torch.addcmul,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_ternary_norm_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
F.batch_norm,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 3, 128, 128])
y = self.data_for(dtype, device, size=[3])
z = self.data_for(dtype, device, size=[3])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
@unittest.skip("FIXME: fuser doesn't include ListConstruct nodes to the group causing a failure")
def test_list_ops(self):
def apply(fn):
return lambda x, y, z: fn([x * x, y * y, z * z])
devices = self.devices
list_ops = [
torch.cat,
]
for dtype, op, device in product(self.dtypes, list_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 4, 1, 7])
y = self.data_for(dtype, device, size=[5, 4, 1, 7])
z = self.data_for(dtype, device, size=[5, 4, 1, 7])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_where_ops(self):
def apply(fn):
return lambda cond, x, y: fn(cond, x, y)
ops = [
torch.where,
lambda cond, x, y: torch.where(cond, x, 3.1415),
lambda cond, x, y: torch.where(cond, 42, y),
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
cond = self.data_for(torch.bool, device)
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(cond, x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (cond, x, y))
self.assertEqual(ref, t(cond, x, y))
self.assertAllFused(t.graph_for(cond, x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_unsupported_dtypes(self):
for device in self.devices:
def fn(x):
return x * x + x
unsupported_dtypes = [
torch.uint8,
torch.complex32,
torch.complex64,
torch.complex128,
torch.qint8,
torch.quint8,
torch.qint32,
]
for dtype in unsupported_dtypes:
try:
x = self.data_for(dtype, device)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
t = torch.jit.trace(fn, (x,))
self.assertEqual(ref, t(x))
self.assertEqual(len(self.findFusionGroups(t.graph_for(x))), 0)
def test_superslomo(self):
devices = self.devices.copy()
if not LLVM_ENABLED:
devices.remove("cpu")
for device in devices:
# Test extracted from Super-SloMo: https://github.com/avinashpaliwal/Super-SloMo
# A few interesting things happen here: strided inputs of mixed size,
# plus outputs of mixed shapes. The latter characteristic happened to
# expose a memory corruption bug due to not properly guarding the
# outputs.
def eager(t0, t1, t2, t3, t4):
t5 = torch.mul(t0, t4)
t6 = torch.mul(t2, t3)
t7 = torch.mul(t6, t1)
t9 = torch.add(t5, t7)
t11 = torch.add(t0, t6)
ft_p = torch.div(t9, t11)
return (ft_p, t11, t9, t6)
t0 = torch.rand(1, 6, 352, 352, device=device).transpose(0, 1)
t1 = torch.rand(6, 3, 352, 352, device=device)
t2 = torch.rand(6, device=device)[None, None, None, :].permute(3, 0, 1, 2)
t3 = torch.rand(6, 1, 352, 352, device=device)
t4 = torch.rand(6, 3, 352, 352, device=device)
inputs = [t0, t1, t2, t3, t4]
script = torch.jit.script(eager)
for _ in range(4):
for pair in zip(script(*inputs), eager(*inputs)):
test, ref = pair
torch.testing.assert_close(test, ref)
self.assertAllFused(script.graph_for(*inputs), except_for={"prim::TupleConstruct"})
def test_sub_gt_and(self):
for device in self.devices:
def eager(t1, t2, t3, t4, t: float):
w = t1 - t2
h = t3 - t4
k = (w > t) & (h > t)
assert k.dtype == torch.bool
if t > 0.5:
# Putting a use of k in a never-executed conditional prevents
# profiling its type, which leaves it as "Tensor". If we
# propagate Tensor back to the definition of k, we have to be
# careful not to create a fusion group containing it.
return k + 1
return w
t = torch.rand(8, dtype=torch.float, device=device)
scripted = self.checkScript(eager, (t, t, t, t, 0.1))
def test_chunk_mul_one(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def eager(x):
z, y, w = torch.chunk(x, 3, -1)
return z * 3, y, w
x = torch.rand(64, 1, 3072, dtype=torch.float, device=device)
z, y, w = eager(x)
script = self.checkScript(eager, (x,))
def test_eq_unsqueeze_type_as(self):
for device in self.devices:
def eager(a, b):
mask = b == 1
mask = torch.unsqueeze(mask, -1)
x = mask.type_as(a)
return x, mask
a = torch.rand(1, 64, 1024, device=device, dtype=torch.float)
b = torch.randint(-2, 2, (1, 64), device=device, dtype=torch.long)
script = self.checkScript(eager, (a, b))
def test_neg_pow(self):
def eager_tt(a: torch.Tensor, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
def eager_ts(a: torch.Tensor, b: float):
return torch.neg(torch.pow(a, b))
def eager_st(a: float, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
a = torch.rand(1, dtype=torch.float)
b = torch.rand(1, dtype=torch.float)
s = b.item()
script = self.checkScript(eager_tt, (a, b))
# TODO: re-enable fusion, which doesn't work right now. just test correctness for now
# self.assertAllFused(script.graph_for(a, b))
script = self.checkScript(eager_ts, (a, s))
# self.assertAllFused(script.graph_for(a, s))
script = self.checkScript(eager_st, (s, b))
# self.assertAllFused(script.graph_for(s, b))
@unittest.skipIf(not LLVM_ENABLED, "Too slow to run with the TE interpreter")
def test_conv2d_depthwise(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=72)
input = torch.rand((1, 72, 56, 56), dtype=torch.float)
weight = torch.rand((72, 1, 3, 3), dtype=torch.float)
bias = torch.rand((72), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
self.assertAllFused(script.graph_for(input, weight, bias))
def test_conv2d(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=1)
input = torch.rand((1, 64, 56, 56), dtype=torch.float)
weight = torch.rand((64, 64, 3, 3), dtype=torch.float)
bias = torch.rand((64), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
def test_type_as_cat(self):
with inline_fusion_groups():
def eager(x, y):
return torch.cat((x, y.type_as(x)), dim=1)
dtypes = self.dtypes.copy()
# CPU fuser doesn't support float16.
dtypes.remove(torch.float16)
dtypes.remove(torch.bfloat16)
for dtype1, dtype2 in product(dtypes, dtypes):
x = torch.randint(2, (1, 13,)).to(dtype1)
zero = torch.tensor([[0]]).to(dtype2)
one = torch.tensor([[1]]).to(dtype2)
script = torch.jit.trace(eager, (x, zero))
for _ in range(3):
torch.testing.assert_close(
script(x, zero),
eager(x, zero))
torch.testing.assert_close(
script(x, one),
eager(x, one))
self.assertAllFused(script.graph_for(x, one))
def test_to_device(self):
def eager(x):
return x.to(device="cpu").relu()
x = torch.rand(8)
script = self.checkScript(eager, (x,))
self.assertAllFused(script.graph_for(x))
def test_dims(self):
def eager(x, y):
return x / (y + 0.0001)
x = torch.linspace(-1, 1, 768, dtype=torch.float32).as_strided((1, 1, 768), (768, 1, 1))
y = torch.tensor([[[2.0]]], dtype=torch.float32)
script = self.checkScript(eager, (x, y))
self.assertAllFused(script.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_channels_last_dims_dynamic(self):
def eager(x, y):
return x + (y + 0.0001)
indices = [0, 1, 2, 3]
sets = []
for i in range(0, len(indices) + 1):
for subset in combinations(indices, i):
sets.append(subset)
for set in sets:
size = [2, 3, 4, 5]
for index in set:
size[index] = 1
inp = torch.rand(size).to(memory_format=torch.channels_last).cuda()
with texpr_enable_strategy([("DYNAMIC", 20)]):
foo_s = torch.jit.trace(eager, (inp, inp))
for _ in range(3):
out = foo_s(inp, inp)
out_eager = eager(inp, inp)
self.assertEqual(out_eager, out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("TensorExpr").run(g)
def test_exhaust_specializations(self):
with texpr_enable_strategy([("STATIC", 1)]):
@torch.jit.script
def foo(x):
return x + x + x
for _ in range(3):
foo(torch.rand([2, 2]))
for _ in range(3):
foo(torch.rand([4, 4, 4]))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
FileCheck().check_count("TensorExpr", 2, exactly=True).run(g)
def test_unsqueeze_var_dim(self):
def eager(x, y, z: int):
return x * torch.unsqueeze(y, dim=z)
x = torch.rand(4, 4, 64).permute(1, 0, 2)
y = torch.rand(4, 4)
z = 2
script = self.checkScript(eager, (x, y, z))
def _test_fwd_bwd(self, fn):
x = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
xs = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
script = torch.jit.script(fn)
for i in range(11):
y = fn(x)
g0 = torch.rand_like(y)
y.backward(g0)
ys = script(xs)
ys.backward(g0)
with torch.no_grad():
x -= 0.1 * x.grad
xs -= 0.1 * xs.grad
x.grad = None
xs.grad = None
torch.testing.assert_close(y, ys)
def test_relu_fwd_bwd(self):
def eager(x):
return torch.relu(x * 1.01)
self._test_fwd_bwd(eager)
def test_hardswish_fwd_bwd(self):
def eager(x):
return F.hardswish(x) * 1.01
self._test_fwd_bwd(eager)
def test_hardsigmoid_fwd_bwd(self):
def eager(x):
return F.hardsigmoid(x) * 1.01
self._test_fwd_bwd(eager)
def test_cat_graph_opt(self):
def foo(x, y, z):
return torch.log(torch.cat([x, y, z]))
self.checkScript(foo, (torch.rand([5, 5]), torch.rand([2, 5]), torch.rand([1, 5])))
# TODO: not sure why not updated graph isn't reflected in last_optimized_graph
self.assertLastGraphAllFused()
def test_dynamic_cat(self):
with inline_fusion_groups():
@torch.jit.script
def repro(xs: List[torch.Tensor], ys: List[torch.Tensor], zs: List[torch.Tensor]):
return [
torch.cat([x, torch.cat([y, z], dim=-1)], dim=-1)
for x, y, z in zip(xs, ys, zs)
]
for _ in range(3):
N = 3
xs = [torch.ones(21) for _ in range(N)]
# Note: concat of ys and zs will have the same size for each
# pair, even though the individual ys and zs do not.
ys = [torch.ones(N - i) for i in range(N)]
zs = [torch.ones(i) for i in range(N)]
repro(xs, ys, zs)
def test_scalar_only_inputs(self):
def eager(b: float):
a = torch.ones(1)
return a * b
script = self.checkScript(eager, (1.0,))
def test_cat_2k_args(self):
with inline_fusion_groups():
def eager(x):
return torch.relu(torch.cat([x for _ in range(2000)]))
x = torch.randn(1)
trace = self.checkTrace(eager, (x,))
fusion_groups = self.findFusionGroups(trace.graph_for(x))
self.assertEqual(len(fusion_groups), 0)
def test_adaptive_avg_pool2d(self):
# TODO: once the adaptive_avg_pool2d is available in OpInfo DB, this
# test should be moved there
with inline_fusion_groups():
def foo1(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2, 2))
def foo2(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2))
x = torch.randn(4, 4, 4)
for foo in [foo1, foo2]:
f = torch.jit.trace(foo, (x,))
kernel = torch._C._te.TensorExprKernel(f.graph)
correct_val = f(x)
self.assertEqual(kernel.run((x,)), correct_val)
def test_unrolled_cat(self):
with inline_fusion_groups():
def eager(x):
ret = torch.empty(0)
for i in range(x.shape[0]):
ret = torch.cat([ret, x[i].relu()])
return ret
script = torch.jit.script(eager)
# Warm up with size=1 tensor; since the loop iterates once the
# profile data will be "burned in" assuming size=1, and then
# unrolled.
x = torch.ones(1, 1)
for _ in range(3):
script(x)
torch.testing.assert_close(eager(x), script(x))
# Now when an input hits the unrolled path, it will produce an
# incorrectly-sized tensor, since size=1 has been burned in.
x = torch.ones((8, 1))
torch.testing.assert_close(eager(x), script(x))
def test_batch_norm(self):
def test(fn, args):
trace = torch.jit.trace(fn, args)
self.assertAllFused(trace.graph_for(*args))
# TODO: Are `NaN`'s actually ok here or did this pass silently before, because `equal_nan=True` was the
# default?
torch.testing.assert_close(fn(*args), trace(*args), equal_nan=True)
def bn(i, x):
return torch.batch_norm(i, x, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_weight(i, x):
return torch.batch_norm(i, None, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_bias(i, x):
return torch.batch_norm(i, x, None, x, x, False, 0.1, 1e-4, False).relu()
def bn_neither(i, x):
return torch.batch_norm(i, None, None, x, x, False, 0.1, 1e-4, False).relu()
for device in self.devices:
i = torch.randn(4, 16, 32, 40, device=device)
x = torch.randn(16, device=device)
for fn in [bn, bn_no_weight, bn_no_bias, bn_neither]:
test(fn, (i, x))
def test_profiler(self):
@torch.jit.script
def test(x, y, z):
return x * y + z
args = [torch.randn(4) for _ in range(3)]
with torch.autograd.profiler.profile() as prof:
for _ in range(3):
test(*args)
self.assertIn("fused_mul_add", prof.table())
def test_skip_grad_in_check(self):
@torch.jit.script
def foo(x):
return (x + 2) / 2
inp = torch.rand([4, 4])
for _ in range(3):
foo(inp)
inp.requires_grad_(True)
with torch.inference_mode():
for _ in range(3):
foo(inp)
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_inline(g)
FileCheck().check_count("prim::If", 1, exactly=True).run(g)
def test_dynamic_shapes(self):
from functools import partial
n = 10
gen_tensor = (
lambda n: R(1, n),
lambda n: R(n, n),
lambda n: R(n, n).transpose(0, 1),
lambda n: R(n + 1, n + 1, 2)[:n, n, 0],
lambda n: R(n, n, 2)[:, :, 0],
lambda n: R(n, n + 1, n + 2, n + 3).to(memory_format=torch.channels_last),
)
with texpr_enable_strategy([("DYNAMIC", 20)]):
def foo(x, y, z):
return torch.sigmoid(torch.tanh(x))
foo.__disable_jit_function_caching__ = True
def fi(x, y, z):
return torch.tanh(x + y)
fi.__disable_jit_function_caching__ = True
def fum(x, y, z):
return torch.tanh(x + y) + z
fum.__disable_jit_function_caching__ = True
funcs = [foo, fi, fum]
with inline_fusion_groups():
for device in self.devices:
I = partial(torch.randint, 0, 100, device=device)
R = partial(torch.randn, device=device)
for i, func in enumerate(funcs):
num_args = i + 1
for j, gen in enumerate(gen_tensor):
inps = (gen(n), gen(n), gen(n))
func_s = torch.jit.trace(func, inps, check_trace=False)
torch._C._jit_pass_erase_shape_information(func_s.graph)
for _ in range(2):
x, y, z = gen(n), gen(n), gen(n)
func_s(x, y, z)
for incr in range(3):
func_s(*[gen(n + 1) for _ in range(3)])
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
# We should see only one optimized kernel
FileCheck().check_count("TensorExprDynamicGuard", 1, exactly=True).run(g)
self.assertEqual(func(*inps), func_s(*inps))
gen = gen_tensor[0]
inps = (gen(n), gen(n), gen(n))
foo_s = torch.jit.trace(foo, inps)
torch._C._jit_pass_erase_shape_information(foo_s.graph)
g_prev = None
for gen in gen_tensor:
for i in range(3):
foo_s(*[gen(n + i) for _ in range(3)])
inps = (gen(n), gen(n), gen(n))
self.assertEqual(foo_s(*inps), foo(*inps))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
FileCheck().check_count("TensorExprDynamicGuard", len(gen_tensor), exactly=True).run(g)
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_up(self):
def f(x):
y = x._autocast_to_full_precision(True, True)
z = torch.exp(y)
return z
x = torch.rand((2, 2), dtype=torch.half, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_down(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.half)
return z
x = torch.rand((2, 2), dtype=torch.float, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
def test_to_dtype(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.bfloat16)
h = z._autocast_to_full_precision(True, True)
i = h.to(dtype=torch.bfloat16)
j = i.to(dtype=torch.float32)
return j
x = torch.rand((2, 2), dtype=torch.float32)
scr = torch.jit.trace(f, x)
scr(x)
scr(x)
self.assertLastGraphAllFused()
self.assertEqual(f(x), scr(x), atol=4e-3, rtol=4e-3)
bf_x = torch.rand((2, 2), dtype=torch.bfloat16)
bf_scr = torch.jit.trace(f, bf_x)
bf_scr(bf_x)
bf_scr(bf_x)
graph = bf_scr.graph_for(bf_x)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 2)
self.assertEqual(f(bf_x), bf_scr(bf_x), atol=4e-3, rtol=4e-3)
def test_with_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4]),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::TensorExprGroup").run(g)
def foo(x):
with torch.jit.strict_fusion():
return x + x + torch.rand([4]) + 3
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(foo)
foo_s(torch.rand([4]))
foo_s(torch.rand([4]))
print(torch.jit.last_executed_optimized_graph())
fc = FileCheck().check("Found unfused operators")
fc.check("aten::rand(SymInt[] size")
fc.check("torch.rand([4]").run(str(error_out.exception))
with warnings.catch_warnings(record=True) as warns:
foo(torch.rand([4]))
FileCheck().check("Only works in script mode").run(str(warns[0]))
def test_autodiff(x):
with torch.jit.strict_fusion():
return torch.rand([4]) + x + x + x
foo_s = torch.jit.script(test_autodiff)
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s(inp)
f = FileCheck().check("unfused operators").check("aten::rand")
f.run(str(error_out.exception))
def test_separate_fusions(x, y):
with torch.jit.strict_fusion():
return x + x + x, y + y + y
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s = torch.jit.script(test_separate_fusions)
foo_s(inp, inp)
f = FileCheck().check("Found multiple fusions")
f.run(str(error_out.exception))
def test_constant_chunk_shapes(self):
# We had an issue where buildShapeExpressions would fail as show below:
#
# %1 : Tensor = Constant[..] # not supported, we don't build this shape
# %2 : Tensor = Constant[..] # not supported
# %3 : Tensor = aten::add(%1, %2) # inputs not supported, we don't build shape
# ... = prim::ConstantChunk[..](%3) # it forgets to check whether input shapes exist, and fails
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
r = torch.tensor(4)
z1, z2 = (x + y + r).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# make sure that we are actually testing the right scenario
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
f_traced = torch.jit.trace(f, (x, y))
for i in range(4):
# make sure this doesn't error out
res = f_traced(x, y)
self.assertEqual(res, f(x, y))
@unittest.skipIf(not RUN_CUDA_HALF, "half-precision NNC fusion requires CUDA")
def test_pow_multiple_dtype(self):
# https://github.com/pytorch/pytorch/issues/75476
def fn(p: torch.Tensor, gamma: float = 2.0) -> torch.Tensor:
p = torch.sigmoid(p)
result = p ** gamma
return result
x = torch.rand((2, 2), dtype=torch.half, device='cuda')
ref = fn(x)
script_fn = torch.jit.script(fn)
for i in range(4):
res = script_fn(x)
self.assertEqual(ref, res)
class TestTEFuserStatic(TestTEFuser):
dynamic_shapes = False
class TestTEFuserDynamic(TestTEFuser):
dynamic_shapes = True
del TestTEFuser
works_list = [
'__radd__',
'__rdiv__',
'__rmul__',
'__rmod__',
'abs',
'acos',
'add',
'addcmul',
'addmm.decomposed',
'asin',
'atan',
'atan2',
'ceil',
'clamp',
'clamp.scalar',
'contiguous',
'cos',
'cosh',
'div.no_rounding_mode',
'div.true_rounding',
'div.floor_rounding',
'div.trunc_rounding',
'eq',
'erf',
'erfc',
'exp',
'expand',
'expand_as',
'expm1',
'floor',
'fmod',
'fmod.autodiffed',
'ge',
'gt',
'isnan',
'le',
'lerp',
'lgamma',
'log',
'log10',
'log1p',
'log2',
'lt',
'masked_fill',
'max.binary',
'mean',
'min.binary',
'mm',
'mul',
'ne',
'neg',
'nn.functional.hardshrink',
'nn.functional.hardsigmoid',
'nn.functional.hardswish',
'nn.functional.softplus',
'nn.functional.hardtanh',
'nn.functional.leaky_relu',
'nn.functional.relu',
'nn.functional.relu6',
'nn.functional.softsign',
'nn.functional.tanhshrink',
'nn.functional.threshold',
'permute',
'pow',
'reciprocal',
'remainder',
'remainder.autodiffed',
'reshape',
'reshape_as',
'round',
'rsub',
'rsub.rsub_tensor',
'rsqrt',
'sigmoid',
'sign',
'sin',
'sinh',
'sqrt',
'sub',
'sum',
't',
'tan',
'tanh',
'transpose',
'true_divide',
'trunc',
'unsqueeze',
'view',
'view_as',
'where',
'bool',
'byte',
'char',
'double',
'float',
'half',
'int',
'long',
'short',
'bool.channels_last',
'byte.channels_last',
'char.channels_last',
'double.channels_last',
'float.channels_last',
'half.channels_last',
'int.channels_last',
'long.channels_last',
'short.channels_last',
]
known_failures = [
'__rmatmul__',
'frac',
'matmul',
]
# If your OpInfo test causes this test to fail, add it here
skip_ops = [
'conj'
]
|
def inline_fusion_groups():
old_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(True)
try:
yield
finally:
torch._C._debug_set_fusion_group_inlining(old_inlining)
class TestTEFuser(JitTestCase):
def setUp(self):
super().setUp()
self.tensorexpr_options = TensorExprTestOptions()
# note: `self.dynamic_shapes` instatiated in specialization of class
# defined below
fusion_strategy = [("DYNAMIC", 20)] if self.dynamic_shapes else [("STATIC", 20)]
self.old_fusion_strategy = torch._C._jit_set_fusion_strategy(fusion_strategy)
self.devices = ["cpu"] if not torch.cuda.is_available() else ["cpu", "cuda"]
self.int_dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.bool,
]
self.fp_dtypes = [
torch.float16,
torch.float32,
torch.float64,
torch.bfloat16,
]
self.dtypes = self.int_dtypes + self.fp_dtypes
def tearDown(self):
self.tensorexpr_options.restore()
torch._C._jit_set_fusion_strategy(self.old_fusion_strategy)
super().tearDown()
def assertAllFused(self, graph, except_for=None):
except_for = except_for if except_for is not None else set()
# TODO - upstream
guards = (
"prim::TypeCheck",
"prim::RequiresGradCheck",
"prim::TensorExprDynamicGuard",
)
guard_found = False
def autodiff_guard(node):
if node.kind() != "aten::all":
return False
inps = list(node.inputs())
if len(inps) != 1 or inps[0].node().kind() != "prim::ListConstruct":
return False
li_inps = list(inps[0].node().inputs())
for li_inp in li_inps:
if li_inp.node().kind() in (
"prim::AutogradAllNonZero",
"prim::AutogradAllZero",
):
return True
return False
def is_guard(node):
return node.kind() in guards or autodiff_guard(node)
for node in graph.block().nodes():
if node.kind() == "prim::Constant":
continue
if is_guard(node):
self.assertFalse(guard_found)
guard_found = True
continue
if node.kind() in except_for:
continue
if node.kind() == "prim::If":
self.assertTrue(is_guard(node.prev()))
continue
self.assertTrue(False, "Found unexpected node:" + node.kind())
self.assertTrue(guard_found)
def assertLastGraphAllFused(self):
self.assertAllFused(torch.jit.last_executed_optimized_graph())
def findFusionGroups(self, graph):
result = []
for n in graph.nodes():
if n.kind() == FUSION_GROUP:
result.append(n.g("Subgraph"))
continue
for block in n.blocks():
result += self.findFusionGroups(block)
return result
def test_typecheck(self):
a = torch.ones(1)
def fused_kernel(a, b):
return (a + b) * 2.0
scripted = self.checkScript(fused_kernel, (a, a))
graph = scripted.graph_for(a, a)
# double check we fused
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
# we use a bigger tensor now (size 2)
# if we won't trigger a recompilation
# we will still create a tensor up to (size 1)
# if the type check fails
a = torch.ones(2)
# shape changed if we don't trigger recompilation
# we would compute the wrong result silently
self.assertEqual(scripted(a, a), fused_kernel(a, a))
def test_sum_simple(self):
def func(x):
x2 = x * x
return x2.sum()
with texpr_reductions_enabled():
a = torch.tensor(list(range(0, 15)), dtype=torch.float, device="cpu")
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_nop(self):
pass
def test_sum_dim(self):
def func(x):
return x.sum((0,)) * 2
def func_neg(x):
return x.sum((-2,)) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(range(0, 15)), dtype=torch.float, device="cpu")
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
scripted = self.checkScript(func_neg, (a,))
self.assertLastGraphAllFused()
def test_sum_keepdim_cast(self):
def func(x):
return x.sum((0,), keepdim=True, dtype=torch.double) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(range(0, 15)), dtype=torch.float, device="cpu")
a = a.reshape(5, 3)
self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_abs(self):
for device in self.devices:
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_unsqueeze_size_calculation(self):
for device in self.devices:
def foo(b, d):
x = d.unsqueeze(1)
y = x * 42.0
z = b + y
r = z / 42.0
return r
inputs = (
torch.rand(20, 28, device=device, requires_grad=True),
torch.rand(20, device=device),
)
scripted = self.checkScript(foo, inputs)
self.assertAllFused(scripted.graph_for(*inputs))
def test_zero_element_tensors(self):
for device in self.devices:
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device=device)
cos = torch.zeros(0, device=device)
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
def test_arg_configurations_smoke(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
traced_f = torch.jit.trace(f, (x, y))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
def test_broadcast(self):
for device in self.devices:
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
]
self.checkScript(scaleshift, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on"
)
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device="cuda")
y = torch.randn(4, 4, dtype=torch.half, device="cuda")
funcs = [self.fn_test_comparison_gt_lt, self.fn_test_relu, self.fn_test_exp]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(),
local_inputs,
allow_unused=True,
retain_graph=True,
)
fusion_grads = torch.autograd.grad(
fusion_output.sum(),
local_fusion_inputs,
allow_unused=True,
retain_graph=True,
)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
def test_checks_cat_inputs(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x**2, y + 4 * y + y**3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device=device)
y = torch.randn(1, 4, dtype=torch.float, device=device)
scripted = self.checkScript(f, (x, y))
self.assertEqual(scripted(x, y).shape, (3, 4))
self.assertAllFused(scripted.graph_for(x, y))
def test_chunk(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device=device)]
self.checkScript(fn, inputs)
self.assertLastGraphAllFused()
def test_chunk_correctness(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(
1, 2
),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
self.assertLastGraphAllFused()
def test_chunk_distributes(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# XXX: The old fuser does broadcast_tensors but the new fuser doesn't.
# FileCheck().check("broadcast_tensors").check('with ' + FUSION_GROUP + '_') \
# .check_count('ConstantChunk', 2, exactly=True).run(str(graph))
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
def test_chunk_motion_deduplicates_inputs(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [torch.tensor([1.1, 1.2], device=device, dtype=torch.float)]
for func in [func1, func2]:
self.checkScript(func, inputs)
self.assertLastGraphAllFused()
def test_chunk_multiple(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 6, 3, dtype=torch.float, device=device),
torch.randn(10, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 2, 6, dtype=torch.float, device=device),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
def test_minmax(self):
for device in self.devices:
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float)
b = torch.randn(4, 4, dtype=torch.float)
nan = torch.tensor(float("nan"), dtype=torch.float)
for f, inputs, device in product(
(tmax, tmin), ([a, b], [a, nan], [b, nan]), self.devices
):
inputs = [t.to(device) for t in inputs]
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
def test_clamp(self):
for device in self.devices:
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float("inf"))
def funcNegInf(a, b):
return torch.clamp(a + b, min=float("-inf"), max=0)
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device=device, requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device=device)
nan = torch.tensor(float("nan"), dtype=torch.float, device=device)
funcs = (func2, funcInf, funcNegInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(
s.graph_for(inp1, inp2),
except_for={"aten::size", "aten::_size_if_not_equal"},
)
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(
graph,
except_for={"aten::Float", "aten::_grad_sum_to_size"}.union(
autograd_check_set
),
)
def test_clamp_double(self):
for device in self.devices:
def clamp_double(x, eta: float):
return 1 - x.clamp(eta, 1 - eta)
x = torch.tensor([1.0, 1.0], dtype=torch.double, device=device)
eta = 1e-9
s = self.checkScript(
clamp_double,
(x, eta),
profiling=ProfilingMode.PROFILING,
atol=1e-10,
rtol=1e-5,
)
self.assertAllFused(s.graph_for(x, eta), except_for={"aten::sub"})
def test_clamp_int(self):
for device in self.devices:
def clamp_int(x, eta: int):
return x.clamp(0, eta)
x = torch.tensor([1, 1], device=device)
eta = 1 << 32
s = self.checkScript(clamp_int, (x, eta), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(x, eta))
def test_add_bool(self):
sizes = [(1,), (2,), (4, 4)]
for device, size in product(self.devices, sizes):
def f(x, y, z):
return x + y + z
x = torch.randint(0, 2, size, dtype=torch.bool, device=device)
y = torch.randint(0, 2, size, dtype=torch.bool, device=device)
z = torch.randint(0, 2, size, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_mul_bool(self):
for device in self.devices:
def f(x, y, z):
return x * y * z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_div_bool(self):
for device in self.devices:
def f(x, y, z):
return (x + y) / z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.ones_like(x, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_bitwise_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
operator.__lshift__,
operator.__rshift__,
]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_minmax_int_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [torch.min, torch.max]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_comparison_eq_ne(self):
for device in self.devices:
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
def test_comparison_gt_lt(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_comparison_ge_le(self):
for device in self.devices:
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(
ge.graph_for(x, y),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
def test_addcmul(self):
for device in self.devices:
t = torch.randn(1, 4, dtype=torch.float, device=device)
t1 = torch.randn(4, 1, dtype=torch.float, device=device)
t2 = torch.randn(1, 4, dtype=torch.float, device=device)
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add(").check("aten::addcmul(").run(
str(fusion_groups[0])
)
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
def test_lerp(self):
for device in self.devices:
start = torch.randn(4, 1, dtype=torch.float, device=device)
end = torch.randn(1, 4, dtype=torch.float, device=device)
weight = torch.tensor(0.5, dtype=torch.float, device=device)
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
# TODO: uncomment when TE enables support for scalar tensors
# ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
# graph = ge_weight_tensor.graph_for(start, end)
# self.assertAllFused(graph)
def test_concat(self):
# disabling concat causes error with single concat node
with set_fusion_group_inlining(True):
for device in self.devices:
hx = torch.randn(3, 20, dtype=torch.float, device=device)
cx = torch.randn(3, 20, dtype=torch.float, device=device)
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
# XXX: TE fuser can handle concats in a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_remove_output_used_only_in_size(self):
for device in self.devices:
def test_fuse(a, b):
c = a + b
d = c + b
return d
scripted_f = torch.jit.script(test_fuse)
x = torch.ones(1, requires_grad=True, device=device)
y = torch.ones(1, requires_grad=True, device=device)
warmup_forward(scripted_f, x, y, profiling_count=3)
g = scripted_f.graph_for(x, y)
diff_nodes = g.findAllNodes("prim::DifferentiableGraph")
self.assertEqual(len(diff_nodes), 1)
g = diff_nodes[0].g("Subgraph")
if_nodes = [n for n in g.nodes() if n.kind() == "prim::If"]
self.assertEqual(len(if_nodes), 1)
# the if node and the fusion group inside it should only have one output
self.assertEqual(len(list(if_nodes[0].outputs())), 1)
def test_concat_invariant(self):
for device in self.devices:
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device=device)
y = torch.randn(2, 2, dtype=torch.float, device=device)
z = torch.randn(4, 2, dtype=torch.float, device=device)
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={"aten::add"})
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + 0.5 * y).exp()
def test_exp(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_threshold(self):
for device in self.devices:
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device=device)
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
def test_scalar_arg(self):
for device in self.devices:
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device=device)
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(
x: torch.Tensor, p: float
) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
out = scripted(x, p)
out = scripted(x, p)
self.assertAllFused(
scripted.graph_for(x, p),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
# TODO: we're currently not checking 'device' in the type info when pulling
# nodes into a fusion group. We should fix that and re-enable this test.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device="cuda:0"),
torch.randn(4, 4, dtype=torch.float, device="cuda:1"),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(ge.graph_for(*inputs), FUSION_GROUP, 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
# FIXME: Use the TE fuser's way of querying the cache.
# self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = "cuda:" + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_lstm(self):
for device in self.devices:
inputs = get_lstm_inputs(device, training=True)
module = self.checkScript(LSTMCellS, inputs)
self.assertAllFused(
module.graph_for(inputs), except_for={"prim::TupleConstruct"}
)
def test_lstm_concat(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
except_nodes = {"prim::TupleConstruct", "aten::linear"}
# TODO... Chunk
if self.dynamic_shapes:
except_nodes = except_nodes.union(
{"aten::add", "prim::ConstantChunk"}
)
self.assertAllFused(ge.graph_for(*inputs), except_for=except_nodes)
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_lstm_gates_permutations(self):
for device in self.devices:
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ["x.mm(w_ih.t())", "hx.mm(w_hh.t())", "b_ih", "b_hh"]
template = dedent(
"""
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
"""
)
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
fusion_group_len = 2 if self.dynamic_shapes else 1
inputs = get_lstm_inputs(device, training=False)
self.assertEqual(cu.cell(*inputs), scope["cell"](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, FUSION_GROUP, fusion_group_len
)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
def test_lstm_traced(self):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertEqual(len(fusion_groups), fusion_group_len)
f = FileCheck()
if not self.dynamic_shapes:
f.check("Chunk")
f.check("aten::sigmoid").check("aten::tanh").run(
str(fusion_groups[0 if not self.dynamic_shapes else 1])
)
def test_milstm(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
for device in self.devices:
inputs = get_milstm_inputs(device, training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertGraphContainsExactly(
forward_graph, FUSION_GROUP, fusion_group_len, consider_subgraphs=True
)
FileCheck().check("DifferentiableGraph").check("TupleConstruct").check_next(
"return"
).check(FUSION_GROUP).run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ["d"]
def __init__(self) -> None:
super().__init__()
self.d = torch.device("cuda")
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device="cuda")
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + 0.5 * y)
def test_relu(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_erf(self):
for device in self.devices:
# only enabled on gpu
if device == "cpu":
continue
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(
ge.graph_for(x),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
# If using profiling, a different function is needed to test different
# shapes, or we'll use a cached script.
def fn_test_rand2(x, y):
r = torch.rand_like(y)
return r * x * x
x = torch.randn(4, 4, dtype=torch.float, device="cuda")
y = torch.randn(4, 4, dtype=torch.float, device="cuda")
script_f = torch.jit.script(fn_test_rand)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(
script_f.graph_for(x, y),
except_for=(
"aten::size",
"prim::BroadcastSizes",
"aten::_size_if_not_equal",
),
)
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device="cuda")
y = torch.ones(4, dtype=torch.float, device="cuda")
script_f = torch.jit.script(fn_test_rand2)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out[0, :] + torch.zeros(4, 4, device="cuda"), out)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_diamond(self):
def fn_test_diamond(x, y):
r = torch.rand_like(y)
a = x + r
b = y - r
return a + b
x = torch.randn(4, 4, dtype=torch.float, device="cuda")
y = torch.randn(4, 4, dtype=torch.float, device="cuda")
script_f = torch.jit.script(fn_test_diamond)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out, x + y)
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device="cpu")
y = torch.tensor(1, dtype=torch.float, device="cpu")
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_inlined_optimized_graph(self):
@torch.jit.script
def foo(x):
return torch.relu(x + x)
for _ in range(3):
foo(torch.rand([4, 4]))
for _ in range(3):
foo(torch.rand([10]))
for _ in range(3):
foo(torch.rand([2, 2, 2]))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count("prim::If", 1, exactly=True).check(
"prim::TensorExpr"
).run(g)
torch._C._jit_pass_inline(g)
f = FileCheck()
for _ in range(3):
f.check("prim::If").check("prim::TensorExpr")
f.run(g)
def test_small_constant(self):
for device in self.devices:
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
# Currently we don't pull constants into fusion groups, because in some
# cases it could remove the constant from the original graph and now our
# fusion group needs to return that constant for its other users.
# Instead of never pulling constants into the fusion group, we should just
# be more careful at how we rewrite its users.
# TODO: fix that and reenable the test.
def test_tensor_scalar_ops(self):
for device in self.devices:
def should_fuse(x):
z = 3.0
y = x + z
return x * y
def should_fuse_scalar(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device=device)]
ge = self.checkScript(should_fuse, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add").check("aten::mul").run(str(fusion_groups[0]))
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(3.0, dtype=torch.float, device=device),
]
ge = self.checkScript(should_fuse_scalar, inputs)
# Check that the fused graph computes correct results when the scalar
# input changes.
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(7.0, dtype=torch.float, device=device),
]
self.assertEqual(ge(*inputs), should_fuse_scalar(*inputs))
# The TE fuser supports fusion of non-constant scalars
self.assertGraphContainsExactly(
ge.graph_for(*inputs), FUSION_GROUP, 1, consider_subgraphs=True
)
def test_where_and_typing(self):
for device in self.devices:
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double, device=device)
y = torch.randn(4, 4, dtype=torch.double, device=device)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(
script_f.graph_for(x, y), except_for={"prim::TupleConstruct"}
)
def test_disabled(self):
old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_override_can_fuse_on_cpu(False)
def fn(a):
return a**2 + a
x = torch.randn(4, dtype=torch.float, device="cpu")
s = self.checkScript(fn, (x,))
g = s.graph_for(x)
self.assertEqual(len(self.findFusionGroups(g)), 0)
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuser_state)
def data_for(self, dtype, device="cuda", size=None):
if size is None:
v = torch.arange(1, 3, dtype=torch.float, device=device)
else:
v = torch.rand(*size, device=device)
if dtype == torch.bool:
return v > 2
elif dtype in [torch.qint8, torch.quint8, torch.qint32]:
return torch.quantize_per_tensor(v, 0.1, 1, dtype=dtype)
else:
return v.to(dtype)
def test_torch_to(self):
# test no op
@torch.jit.script
def foo(x):
return x.to(torch.float)
foo(torch.tensor([3.0], dtype=torch.float))
foo(torch.tensor([3.0], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
# test not fusing non-const inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(dtype)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
# test not fusing to_pinned inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(pin_memory=True)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
foo(torch.tensor([3.0], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
# test across-device not supported
if torch.cuda.is_available():
@torch.jit.script
def foo(x):
return x.to(device="cuda")
foo(torch.tensor([3.0], dtype=torch.float))
foo(torch.tensor([3.0], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
sizes = [(1, 4), (4, 4)]
# reuses cast impl, smaller dtype set for faster test
dtypes = [
torch.bool,
torch.int,
torch.float16,
torch.float32,
torch.float64,
]
class MyMod(torch.nn.Module):
def __init__(self, dtype):
super().__init__()
self.dtype = dtype
def forward(self, x):
return x.to(self.dtype)
bad_dtypes = []
for dtype, output_dtype, device, size in product(
dtypes, dtypes, self.devices, sizes
):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
if dtype == output_dtype:
continue
x = self.data_for(dtype, device, size=size)
mod = MyMod(output_dtype)
ref = mod.forward(x)
# use freezing to make non-Tensor args to `to` constant
mod = torch.jit.freeze(torch.jit.script(mod.eval()))
warmup_forward(mod.forward, x)
self.assertEqual(ref, mod.forward(x))
self.assertLastGraphAllFused()
@unittest.skip("Temporarily disabled")
def test_masked_fill(self):
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
# torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
sizes = [(2,), (4, 4)]
for self_dtype, device, scalar_val, size in product(
dtypes, self.devices, [0.4, 3], sizes
):
input_v = self.data_for(self_dtype, device, size=size)
mask = self.data_for(torch.bool, device, size=size)
def fn(input_v, mask):
return torch.masked_fill(input_v, mask, scalar_val)
ref = fn(input_v, mask)
try:
t = torch.jit.trace(fn, (input_v, mask))
torch.testing.assert_close(ref, t(input_v, mask))
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(
[
"Failed:",
str(self_dtype),
op.__name__, # noqa: F821
device,
str(size),
]
)
) from e
def test_isnan(self):
x = torch.rand([4])
x[0] = float("nan")
inputs = [x, torch.tensor([float("nan"), 0.5])]
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
for inp, device, dtype in product(inputs, self.devices, dtypes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
inp = inp.to(device=device, dtype=dtype)
try:
f = torch.jit.trace(lambda x: x.isnan(), (inp,))
warmup_forward(f, inp)
self.assertEqual(f(inp), inp.isnan())
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), "isnan", device])
) from e
def test_gelu(self):
def apply(fn):
return lambda x, approximate: fn(x, approximate)
unary_ops = [
F.gelu,
]
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(
self.dtypes, unary_ops, self.devices, sizes
):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
cond = self.data_for(torch.bool, device)
fn = apply(op)
ref = fn(x, cond)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, cond))
torch.testing.assert_close(ref, t(x, cond))
self.assertAllFused(t.graph_for(x, cond))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device, str(size)])
) from e
def test_unary_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply(fn):
return lambda x: fn(x)
unary_ops = [
torch.lgamma,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
F.relu6,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.sin,
torch.tan,
torch.acos,
torch.asin,
torch.cosh,
torch.sinh,
torch.atan,
torch.tanh,
F.hardtanh,
F.hardsigmoid,
F.hardswish,
F.softplus,
F.silu,
F.mish,
F.elu,
torch.sqrt,
torch.rsqrt,
torch.abs,
# TODO broken on int8 since
# https://github.com/pytorch/pytorch/pull/85144
# RuntimeError: Invalid integral op_type: 23
# torch.ceil,
# torch.floor,
# torch.round,
# torch.trunc,
torch.frac,
# TODO: broken on ROCm?
# F.hardshrink,
F.leaky_relu,
lambda x: torch.threshold(x, 0, -10),
# TODO: broken since type promotion was added
# lambda x: torch.clamp(x, -10, 10),
]
gpu_only = {torch.erf, torch.erfc}
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(
self.dtypes, unary_ops, self.devices, sizes
):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
# todo - re-enable. fails with .500
if dtype == torch.bfloat16 and op == torch.round:
continue
if op in gpu_only and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
fn = apply(op)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x,))
torch.testing.assert_close(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(
["Failed:", str(dtype), op.__name__, device, str(size)]
)
) from e
def test_binary_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.min,
torch.max,
lambda x, y: torch.lerp(x, y, 0.5),
torch.atan2,
torch.div,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.lt,
torch.fmod,
torch.remainder,
lambda x, y: y.type_as(x),
]
fp_only = [
torch.fmod,
torch.remainder,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, binary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
self.assertEqual(ref, t(x, y))
if op not in fp_only or dtype.is_floating_point:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_binary_scalar_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
ir_template = """
graph(%x : {dtype_x}, %y : {dtype_y}):
%z = {op}(%x, %y)
return (%z)"""
binary_ops = [
"aten::mul",
"aten::add",
"aten::sub",
"aten::div",
"aten::lt",
"aten::le",
"aten::eq",
"aten::ne",
"aten::gt",
"aten::ge",
"aten::__or__",
"aten::__xor__",
"aten::__and__",
"aten::__lshift__",
"aten::__rshift__",
]
dtypes = ["int", "float", "bool"]
values = {"int": [10, 3], "float": [12.34, 2.78], "bool": [True, False]}
devices = self.devices
for dtype_x, dtype_y, op, device in product(
dtypes, dtypes, binary_ops, devices
):
code = ir_template.format(**locals())
# Interpret the graph
try:
graph = torch._C.parse_ir(code)
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
except Exception:
# If we can't interpret this IR, don't bother checking NNC.
continue
# Compile the graph
try:
k = torch._C._te.TensorExprKernel(graph)
except Exception as e:
raise RuntimeError(
" ".join(["Compilation failed:", device, str(code)])
) from e
# Run the graph
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
try:
res = k.run((x, y))
self.assertEqual(ref, res)
except Exception as e:
raise RuntimeError(
" ".join(
["Failed at runtime:", device, str(x), str(y), str(code)]
)
) from e
def test_matmul(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def fn(x, y):
return torch.matmul(x, y)
devices = ["cpu"] # No cuda support for ext calls yet
sizes = [
[[128, 128], [128, 128]],
[[10, 10], [10, 10]],
[[1, 16], [16, 128]],
[[128], [128]],
[[128], [128, 128]],
[[3], [3]],
[[3, 4], [4]],
[[10, 3, 4], [4]],
[[10, 3, 4], [10, 4, 5]],
[[10, 3, 4], [4, 5]],
]
# Only 2D x 2D matrix multiply is supported. For non-supported sizes we
# still want to run results verification to test that we didn't
# accidentally fuse it, but we skip the 'is-fused' check.
# TODO: add support for other shape combinations and make this set empty:
skip_is_fused_check_sizes = [
"[[128], [128]]",
"[[128], [128, 128]]",
"[[3], [3]]",
"[[3, 4], [4]]",
"[[10, 3, 4], [4]]",
"[[10, 3, 4], [10, 4, 5]]",
"[[10, 3, 4], [4, 5]]",
]
for dtype, size, device in product(self.dtypes, sizes, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
size_x, size_y = size
x = self.data_for(dtype, device, size=size_x)
y = self.data_for(dtype, device, size=size_y)
ref = fn(x, y)
except Exception as e:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
t(x, y)
self.assertEqual(ref, t(x, y))
if str(size) not in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(" ".join(["Failed:", str(dtype), device])) from e
def test_binary_tensor_scalar_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
# FIXME: Fails in IR Eval: torch.int64 and_ cpu
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.eq,
torch.ne,
torch.ge,
torch.lt,
torch.gt,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(
self.dtypes, binary_ops, devices, scalars
):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_binary_div_ops(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
binary_ops = [
torch.div,
torch.remainder,
torch.fmod,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, -2.0, -1] # skip 0
for dtype, op, device, scalar in product(
self.dtypes, binary_ops, devices, scalars
):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
except Exception as e:
raise RuntimeError(
f"Failed: {dtype} {op.__name__} {device} {scalar}"
) from e
def test_binary_pow(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
dtypes = [
# FIXME: 'pow' fails with dtype=torch.float16/device=cuda/scalar=0
# torch.float16,
torch.float32,
torch.float64,
# torch.bool intentionally not included
]
binary_ops = [
torch.pow,
]
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(
dtypes, binary_ops, self.devices, scalars
):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_ternary_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
torch.lerp,
torch.addcmul,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_ternary_norm_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
F.batch_norm,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 3, 128, 128])
y = self.data_for(dtype, device, size=[3])
z = self.data_for(dtype, device, size=[3])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
@unittest.skip(
"FIXME: fuser doesn't include ListConstruct nodes to the group causing a failure"
)
def test_list_ops(self):
def apply(fn):
return lambda x, y, z: fn([x * x, y * y, z * z])
devices = self.devices
list_ops = [
torch.cat,
]
for dtype, op, device in product(self.dtypes, list_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 4, 1, 7])
y = self.data_for(dtype, device, size=[5, 4, 1, 7])
z = self.data_for(dtype, device, size=[5, 4, 1, 7])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_where_ops(self):
def apply(fn):
return lambda cond, x, y: fn(cond, x, y)
ops = [
torch.where,
lambda cond, x, y: torch.where(cond, x, 3.1415),
lambda cond, x, y: torch.where(cond, 42, y),
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
cond = self.data_for(torch.bool, device)
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(cond, x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (cond, x, y))
self.assertEqual(ref, t(cond, x, y))
self.assertAllFused(t.graph_for(cond, x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
) from e
def test_unsupported_dtypes(self):
for device in self.devices:
def fn(x):
return x * x + x
unsupported_dtypes = [
torch.uint8,
torch.complex32,
torch.complex64,
torch.complex128,
torch.qint8,
torch.quint8,
torch.qint32,
]
for dtype in unsupported_dtypes:
try:
x = self.data_for(dtype, device)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
t = torch.jit.trace(fn, (x,))
self.assertEqual(ref, t(x))
self.assertEqual(len(self.findFusionGroups(t.graph_for(x))), 0)
def test_superslomo(self):
devices = self.devices.copy()
if not LLVM_ENABLED:
devices.remove("cpu")
for device in devices:
# Test extracted from Super-SloMo: https://github.com/avinashpaliwal/Super-SloMo
# A few interesting things happen here: strided inputs of mixed size,
# plus outputs of mixed shapes. The latter characteristic happened to
# expose a memory corruption bug due to not properly guarding the
# outputs.
def eager(t0, t1, t2, t3, t4):
t5 = torch.mul(t0, t4)
t6 = torch.mul(t2, t3)
t7 = torch.mul(t6, t1)
t9 = torch.add(t5, t7)
t11 = torch.add(t0, t6)
ft_p = torch.div(t9, t11)
return (ft_p, t11, t9, t6)
t0 = torch.rand(1, 6, 352, 352, device=device).transpose(0, 1)
t1 = torch.rand(6, 3, 352, 352, device=device)
t2 = torch.rand(6, device=device)[None, None, None, :].permute(3, 0, 1, 2)
t3 = torch.rand(6, 1, 352, 352, device=device)
t4 = torch.rand(6, 3, 352, 352, device=device)
inputs = [t0, t1, t2, t3, t4]
script = torch.jit.script(eager)
for _ in range(4):
for pair in zip(script(*inputs), eager(*inputs)):
test, ref = pair
torch.testing.assert_close(test, ref)
self.assertAllFused(
script.graph_for(*inputs), except_for={"prim::TupleConstruct"}
)
def test_sub_gt_and(self):
for device in self.devices:
def eager(t1, t2, t3, t4, t: float):
w = t1 - t2
h = t3 - t4
k = (w > t) & (h > t)
assert k.dtype == torch.bool
if t > 0.5:
# Putting a use of k in a never-executed conditional prevents
# profiling its type, which leaves it as "Tensor". If we
# propagate Tensor back to the definition of k, we have to be
# careful not to create a fusion group containing it.
return k + 1
return w
t = torch.rand(8, dtype=torch.float, device=device)
scripted = self.checkScript(eager, (t, t, t, t, 0.1))
@skipIfTorchDynamo("too slow")
def test_chunk_mul_one(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def eager(x):
z, y, w = torch.chunk(x, 3, -1)
return z * 3, y, w
x = torch.rand(64, 1, 3072, dtype=torch.float, device=device)
z, y, w = eager(x)
script = self.checkScript(eager, (x,))
def test_eq_unsqueeze_type_as(self):
for device in self.devices:
def eager(a, b):
mask = b == 1
mask = torch.unsqueeze(mask, -1)
x = mask.type_as(a)
return x, mask
a = torch.rand(1, 64, 1024, device=device, dtype=torch.float)
b = torch.randint(-2, 2, (1, 64), device=device, dtype=torch.long)
script = self.checkScript(eager, (a, b))
def test_neg_pow(self):
def eager_tt(a: torch.Tensor, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
def eager_ts(a: torch.Tensor, b: float):
return torch.neg(torch.pow(a, b))
def eager_st(a: float, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
a = torch.rand(1, dtype=torch.float)
b = torch.rand(1, dtype=torch.float)
s = b.item()
script = self.checkScript(eager_tt, (a, b))
# TODO: re-enable fusion, which doesn't work right now. just test correctness for now
# self.assertAllFused(script.graph_for(a, b))
script = self.checkScript(eager_ts, (a, s))
# self.assertAllFused(script.graph_for(a, s))
script = self.checkScript(eager_st, (s, b))
# self.assertAllFused(script.graph_for(s, b))
@unittest.skipIf(not LLVM_ENABLED, "Too slow to run with the TE interpreter")
def test_conv2d_depthwise(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=72)
input = torch.rand((1, 72, 56, 56), dtype=torch.float)
weight = torch.rand((72, 1, 3, 3), dtype=torch.float)
bias = torch.rand((72), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
self.assertAllFused(script.graph_for(input, weight, bias))
def test_conv2d(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=1)
input = torch.rand((1, 64, 56, 56), dtype=torch.float)
weight = torch.rand((64, 64, 3, 3), dtype=torch.float)
bias = torch.rand((64), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
FileCheck().check_not("TensorExpr").run(
torch.jit.last_executed_optimized_graph()
)
def test_type_as_cat(self):
with inline_fusion_groups():
def eager(x, y):
return torch.cat((x, y.type_as(x)), dim=1)
dtypes = self.dtypes.copy()
# CPU fuser doesn't support float16.
dtypes.remove(torch.float16)
dtypes.remove(torch.bfloat16)
for dtype1, dtype2 in product(dtypes, dtypes):
x = torch.randint(2, (1, 13)).to(dtype1)
zero = torch.tensor([[0]]).to(dtype2)
one = torch.tensor([[1]]).to(dtype2)
script = torch.jit.trace(eager, (x, zero))
for _ in range(3):
torch.testing.assert_close(script(x, zero), eager(x, zero))
torch.testing.assert_close(script(x, one), eager(x, one))
self.assertAllFused(script.graph_for(x, one))
def test_to_device(self):
def eager(x):
return x.to(device="cpu").relu()
x = torch.rand(8)
script = self.checkScript(eager, (x,))
self.assertAllFused(script.graph_for(x))
def test_dims(self):
def eager(x, y):
return x / (y + 0.0001)
x = torch.linspace(-1, 1, 768, dtype=torch.float32).as_strided(
(1, 1, 768), (768, 1, 1)
)
y = torch.tensor([[[2.0]]], dtype=torch.float32)
script = self.checkScript(eager, (x, y))
self.assertAllFused(script.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_channels_last_dims_dynamic(self):
def eager(x, y):
return x + (y + 0.0001)
indices = [0, 1, 2, 3]
sets = []
for i in range(0, len(indices) + 1):
for subset in combinations(indices, i):
sets.append(subset) # noqa: PERF402
for set in sets:
size = [2, 3, 4, 5]
for index in set:
size[index] = 1
inp = torch.rand(size).to(memory_format=torch.channels_last).cuda()
with texpr_enable_strategy([("DYNAMIC", 20)]):
foo_s = torch.jit.trace(eager, (inp, inp))
for _ in range(3):
out = foo_s(inp, inp)
out_eager = eager(inp, inp)
self.assertEqual(out_eager, out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("TensorExpr").run(g)
def test_exhaust_specializations(self):
with texpr_enable_strategy([("STATIC", 1)]):
@torch.jit.script
def foo(x):
return x + x + x
for _ in range(3):
foo(torch.rand([2, 2]))
for _ in range(3):
foo(torch.rand([4, 4, 4]))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
FileCheck().check_count("TensorExpr", 2, exactly=True).run(g)
def test_unsqueeze_var_dim(self):
def eager(x, y, z: int):
return x * torch.unsqueeze(y, dim=z)
x = torch.rand(4, 4, 64).permute(1, 0, 2)
y = torch.rand(4, 4)
z = 2
script = self.checkScript(eager, (x, y, z))
def _test_fwd_bwd(self, fn):
x = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
xs = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
script = torch.jit.script(fn)
for i in range(11):
y = fn(x)
g0 = torch.rand_like(y)
y.backward(g0)
ys = script(xs)
ys.backward(g0)
with torch.no_grad():
x -= 0.1 * x.grad
xs -= 0.1 * xs.grad
x.grad = None
xs.grad = None
torch.testing.assert_close(y, ys)
def test_relu_fwd_bwd(self):
def eager(x):
return torch.relu(x * 1.01)
self._test_fwd_bwd(eager)
def test_hardswish_fwd_bwd(self):
def eager(x):
return F.hardswish(x) * 1.01
self._test_fwd_bwd(eager)
def test_hardsigmoid_fwd_bwd(self):
def eager(x):
return F.hardsigmoid(x) * 1.01
self._test_fwd_bwd(eager)
def test_cat_graph_opt(self):
def foo(x, y, z):
return torch.log(torch.cat([x, y, z]))
self.checkScript(
foo, (torch.rand([5, 5]), torch.rand([2, 5]), torch.rand([1, 5]))
)
# TODO: not sure why not updated graph isn't reflected in last_optimized_graph
self.assertLastGraphAllFused()
def test_dynamic_cat(self):
with inline_fusion_groups():
@torch.jit.script
def repro(
xs: List[torch.Tensor], ys: List[torch.Tensor], zs: List[torch.Tensor]
):
return [
torch.cat([x, torch.cat([y, z], dim=-1)], dim=-1)
for x, y, z in zip(xs, ys, zs)
]
for _ in range(3):
N = 3
xs = [torch.ones(21) for _ in range(N)]
# Note: concat of ys and zs will have the same size for each
# pair, even though the individual ys and zs do not.
ys = [torch.ones(N - i) for i in range(N)]
zs = [torch.ones(i) for i in range(N)]
repro(xs, ys, zs)
def test_scalar_only_inputs(self):
def eager(b: float):
a = torch.ones(1)
return a * b
script = self.checkScript(eager, (1.0,))
def test_cat_2k_args(self):
with inline_fusion_groups():
def eager(x):
return torch.relu(torch.cat([x for _ in range(2000)]))
x = torch.randn(1)
trace = self.checkTrace(eager, (x,))
fusion_groups = self.findFusionGroups(trace.graph_for(x))
self.assertEqual(len(fusion_groups), 0)
def test_adaptive_avg_pool2d(self):
# TODO: once the adaptive_avg_pool2d is available in OpInfo DB, this
# test should be moved there
with inline_fusion_groups():
def foo1(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2, 2))
def foo2(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2))
x = torch.randn(4, 4, 4)
for foo in [foo1, foo2]:
f = torch.jit.trace(foo, (x,))
kernel = torch._C._te.TensorExprKernel(f.graph)
correct_val = f(x)
self.assertEqual(kernel.run((x,)), correct_val)
def test_unrolled_cat(self):
with inline_fusion_groups():
def eager(x):
ret = torch.empty(0)
for i in range(x.shape[0]):
ret = torch.cat([ret, x[i].relu()])
return ret
script = torch.jit.script(eager)
# Warm up with size=1 tensor; since the loop iterates once the
# profile data will be "burned in" assuming size=1, and then
# unrolled.
x = torch.ones(1, 1)
for _ in range(3):
script(x)
torch.testing.assert_close(eager(x), script(x))
# Now when an input hits the unrolled path, it will produce an
# incorrectly-sized tensor, since size=1 has been burned in.
x = torch.ones((8, 1))
torch.testing.assert_close(eager(x), script(x))
@skipIfTorchDynamo("too slow")
@unittest.skipIf(TEST_WITH_ASAN, "takes 10+ minutes on asan")
@unittest.skipIf(TEST_WITH_ROCM, "Tensor-likes are not close for nans")
def test_batch_norm(self):
def test(fn, args):
trace = torch.jit.trace(fn, args)
self.assertAllFused(trace.graph_for(*args))
# TODO: Are `NaN`'s actually ok here or did this pass silently before, because `equal_nan=True` was the
# default?
torch.testing.assert_close(fn(*args), trace(*args), equal_nan=True)
def bn(i, x):
return torch.batch_norm(i, x, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_weight(i, x):
return torch.batch_norm(i, None, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_bias(i, x):
return torch.batch_norm(i, x, None, x, x, False, 0.1, 1e-4, False).relu()
def bn_neither(i, x):
return torch.batch_norm(i, None, None, x, x, False, 0.1, 1e-4, False).relu()
for device in self.devices:
i = torch.randn(4, 16, 32, 40, device=device)
x = torch.randn(16, device=device)
for fn in [bn, bn_no_weight, bn_no_bias, bn_neither]:
test(fn, (i, x))
def test_profiler(self):
@torch.jit.script
def test(x, y, z):
return x * y + z
args = [torch.randn(4) for _ in range(3)]
with torch.autograd.profiler.profile() as prof:
for _ in range(3):
test(*args)
self.assertIn("fused_mul_add", prof.table())
def test_skip_grad_in_check(self):
@torch.jit.script
def foo(x):
return (x + 2) / 2
inp = torch.rand([4, 4])
for _ in range(3):
foo(inp)
inp.requires_grad_(True)
with torch.inference_mode():
for _ in range(3):
foo(inp)
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_inline(g)
FileCheck().check_count("prim::If", 1, exactly=True).run(g)
def test_dynamic_shapes(self):
from functools import partial
n = 10
gen_tensor = (
lambda n: R(1, n),
lambda n: R(n, n),
lambda n: R(n, n).transpose(0, 1),
lambda n: R(n + 1, n + 1, 2)[:n, n, 0],
lambda n: R(n, n, 2)[:, :, 0],
lambda n: R(n, n + 1, n + 2, n + 3).to(memory_format=torch.channels_last),
)
with texpr_enable_strategy([("DYNAMIC", 20)]):
def foo(x, y, z):
return torch.sigmoid(torch.tanh(x))
foo.__disable_jit_function_caching__ = True
def fi(x, y, z):
return torch.tanh(x + y)
fi.__disable_jit_function_caching__ = True
def fum(x, y, z):
return torch.tanh(x + y) + z
fum.__disable_jit_function_caching__ = True
funcs = [foo, fi, fum]
with inline_fusion_groups():
for device in self.devices:
I = partial(torch.randint, 0, 100, device=device)
R = partial(torch.randn, device=device)
for i, func in enumerate(funcs):
num_args = i + 1
for j, gen in enumerate(gen_tensor):
inps = (gen(n), gen(n), gen(n))
func_s = torch.jit.trace(func, inps, check_trace=False)
torch._C._jit_pass_erase_shape_information(func_s.graph)
for _ in range(2):
x, y, z = gen(n), gen(n), gen(n)
func_s(x, y, z)
for incr in range(3):
func_s(*[gen(n + 1) for _ in range(3)])
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
# We should see only one optimized kernel
FileCheck().check_count(
"TensorExprDynamicGuard", 1, exactly=True
).run(g)
self.assertEqual(func(*inps), func_s(*inps))
gen = gen_tensor[0]
inps = (gen(n), gen(n), gen(n))
foo_s = torch.jit.trace(foo, inps)
torch._C._jit_pass_erase_shape_information(foo_s.graph)
g_prev = None
for gen in gen_tensor:
for i in range(3):
foo_s(*[gen(n + i) for _ in range(3)])
inps = (gen(n), gen(n), gen(n))
self.assertEqual(foo_s(*inps), foo(*inps))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
FileCheck().check_count(
"TensorExprDynamicGuard", len(gen_tensor), exactly=True
).run(g)
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_up(self):
def f(x):
y = x._autocast_to_full_precision(True, True)
z = torch.exp(y)
return z
x = torch.rand((2, 2), dtype=torch.half, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_down(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.half)
return z
x = torch.rand((2, 2), dtype=torch.float, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
def test_to_dtype(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.bfloat16)
h = z._autocast_to_full_precision(True, True)
i = h.to(dtype=torch.bfloat16)
j = i.to(dtype=torch.float32)
return j
x = torch.rand((2, 2), dtype=torch.float32)
scr = torch.jit.trace(f, x)
scr(x)
scr(x)
self.assertLastGraphAllFused()
self.assertEqual(f(x), scr(x), atol=4e-3, rtol=4e-3)
bf_x = torch.rand((2, 2), dtype=torch.bfloat16)
bf_scr = torch.jit.trace(f, bf_x)
bf_scr(bf_x)
bf_scr(bf_x)
graph = bf_scr.graph_for(bf_x)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 2)
self.assertEqual(f(bf_x), bf_scr(bf_x), atol=4e-3, rtol=4e-3)
def test_with_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4]),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::TensorExprGroup").run(g)
def foo(x):
with torch.jit.strict_fusion():
return x + x + torch.rand([4]) + 3
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(foo)
foo_s(torch.rand([4]))
foo_s(torch.rand([4]))
print(torch.jit.last_executed_optimized_graph())
fc = FileCheck().check("Found unfused operators")
fc.check("aten::rand(SymInt[] size")
fc.check("torch.rand([4]").run(str(error_out.exception))
with warnings.catch_warnings(record=True) as warns:
foo(torch.rand([4]))
FileCheck().check("Only works in script mode").run(str(warns[0]))
def test_autodiff(x):
with torch.jit.strict_fusion():
return torch.rand([4]) + x + x + x
foo_s = torch.jit.script(test_autodiff)
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s(inp)
f = FileCheck().check("unfused operators").check("aten::rand")
f.run(str(error_out.exception))
def test_separate_fusions(x, y):
with torch.jit.strict_fusion():
return x + x + x, y + y + y
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s = torch.jit.script(test_separate_fusions)
foo_s(inp, inp)
f = FileCheck().check("Found multiple fusions")
f.run(str(error_out.exception))
def test_constant_chunk_shapes(self):
# We had an issue where buildShapeExpressions would fail as show below:
#
# %1 : Tensor = Constant[..] # not supported, we don't build this shape
# %2 : Tensor = Constant[..] # not supported
# %3 : Tensor = aten::add(%1, %2) # inputs not supported, we don't build shape
# ... = prim::ConstantChunk[..](%3) # it forgets to check whether input shapes exist, and fails
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
r = torch.tensor(4)
z1, z2 = (x + y + r).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# make sure that we are actually testing the right scenario
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
f_traced = torch.jit.trace(f, (x, y))
for i in range(4):
# make sure this doesn't error out
res = f_traced(x, y)
self.assertEqual(res, f(x, y))
@unittest.skipIf(not RUN_CUDA_HALF, "half-precision NNC fusion requires CUDA")
def test_pow_multiple_dtype(self):
# https://github.com/pytorch/pytorch/issues/75476
def fn(p: torch.Tensor, gamma: float = 2.0) -> torch.Tensor:
p = torch.sigmoid(p)
result = p**gamma
return result
x = torch.rand((2, 2), dtype=torch.half, device="cuda")
ref = fn(x)
script_fn = torch.jit.script(fn)
for i in range(4):
res = script_fn(x)
self.assertEqual(ref, res)
class TestTEFuserStatic(TestTEFuser):
dynamic_shapes = False
class TestTEFuserDynamic(TestTEFuser):
dynamic_shapes = True
del TestTEFuser
works_list = [
"__radd__",
"__rdiv__",
"__rmul__",
"__rmod__",
"abs",
"acos",
"add",
"addcmul",
"addmm.decomposed",
"asin",
"atan",
"atan2",
"ceil",
"clamp",
"clamp.scalar",
"contiguous",
"cos",
"cosh",
"div.no_rounding_mode",
"div.true_rounding",
"div.floor_rounding",
"div.trunc_rounding",
"eq",
"erf",
"erfc",
"exp",
"expand",
"expand_as",
"expm1",
"floor",
"fmod",
"fmod.autodiffed",
"ge",
"gt",
"isnan",
"le",
"lerp",
"lgamma",
"log",
"log10",
"log1p",
"log2",
"lt",
"masked_fill",
"max.binary",
"mean",
"min.binary",
"mm",
"mul",
"ne",
"neg",
"nn.functional.hardshrink",
"nn.functional.hardsigmoid",
"nn.functional.hardswish",
"nn.functional.softplus",
"nn.functional.hardtanh",
"nn.functional.leaky_relu",
"nn.functional.relu",
"nn.functional.relu6",
"nn.functional.softsign",
"nn.functional.tanhshrink",
"nn.functional.threshold",
"permute",
"pow",
"reciprocal",
"remainder",
"remainder.autodiffed",
"reshape",
"reshape_as",
"round",
"rsub",
"rsub.rsub_tensor",
"rsqrt",
"sigmoid",
"sign",
"sin",
"sinh",
"sqrt",
"sub",
"sum",
"t",
"tan",
"tanh",
"transpose",
"true_divide",
"trunc",
"unsqueeze",
"view",
"view_as",
"where",
"bool",
"byte",
"char",
"double",
"float",
"half",
"int",
"long",
"short",
"bool.channels_last",
"byte.channels_last",
"char.channels_last",
"double.channels_last",
"float.channels_last",
"half.channels_last",
"int.channels_last",
"long.channels_last",
"short.channels_last",
]
known_failures = [
"__rmatmul__",
"frac",
"matmul",
]
# If your OpInfo test causes this test to fail, add it here
skip_ops = ["conj"]
|
import operator
import os
import unittest
import contextlib
import math
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from typing import List
import warnings
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, slowTest, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining, \
clone_inputs, get_traced_sample_variant_pairs, TensorExprTestOptions, NoTracerWarnContextManager
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests, \
OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from textwrap import dedent
from itertools import product, permutations, combinations
from test_jit import backward_graph, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
from jit.test_fuser_common import TestFuserCommon # noqa: F401
FUSION_GROUP = 'prim::TensorExprGroup'
LLVM_ENABLED = torch._C._llvm_enabled()
autograd_check_set = {'aten::__is__', 'prim::AutogradAllNonZero', 'prim::AutogradAllZero', 'prim::ListConstruct'}
from functools import partial
|
import contextlib
import math
import operator
import os
import unittest
import warnings
from typing import List
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from itertools import combinations, permutations, product
from textwrap import dedent
from jit.test_fuser_common import TestFuserCommon # noqa: F401
from test_jit import (
backward_graph,
get_lstm_inputs,
get_milstm_inputs,
LSTMCellC,
LSTMCellF,
LSTMCellS,
MiLSTMCell,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import (
enable_profiling_mode_for_profiling_tests,
GRAPH_EXECUTOR,
IS_FBCODE,
ProfilingMode,
run_tests,
skipIfTorchDynamo,
slowTest,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
)
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing._internal.jit_utils import (
clone_inputs,
get_traced_sample_variant_pairs,
JitTestCase,
NoTracerWarnContextManager,
RUN_CUDA,
RUN_CUDA_HALF,
RUN_CUDA_MULTI_GPU,
set_fusion_group_inlining,
TensorExprTestOptions,
warmup_backward,
)
FUSION_GROUP = "prim::TensorExprGroup"
LLVM_ENABLED = torch._C._llvm_enabled()
autograd_check_set = {
"aten::__is__",
"prim::AutogradAllNonZero",
"prim::AutogradAllZero",
"prim::ListConstruct",
}
from functools import partial
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jit_fuser_te.py
|
test_matmul
|
def test_matmul(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def fn(x, y):
return torch.matmul(x, y)
devices = ['cpu'] # No cuda support for ext calls yet
sizes = [[[128, 128], [128, 128]],
[[10, 10], [10, 10]],
[[1, 16], [16, 128]],
[[128], [128]],
[[128], [128, 128]],
[[3], [3]],
[[3, 4], [4]],
[[10, 3, 4], [4]],
[[10, 3, 4], [10, 4, 5]],
[[10, 3, 4], [4, 5]],
]
# Only 2D x 2D matrix multiply is supported. For non-supported sizes we
# still want to run results verification to test that we didn't
# accidentally fuse it, but we skip the 'is-fused' check.
# TODO: add support for other shape combinations and make this set empty:
skip_is_fused_check_sizes = ["[[128], [128]]",
"[[128], [128, 128]]",
"[[3], [3]]",
"[[3, 4], [4]]",
"[[10, 3, 4], [4]]",
"[[10, 3, 4], [10, 4, 5]]",
"[[10, 3, 4], [4, 5]]",
]
for dtype, size, device in product(self.dtypes, sizes, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
size_x, size_y = size
x = self.data_for(dtype, device, size=size_x)
y = self.data_for(dtype, device, size=size_y)
ref = fn(x, y)
except Exception as e:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
t(x, y)
self.assertEqual(ref, t(x, y))
if not str(size) in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), device])
) from e
|
def test_matmul(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def fn(x, y):
return torch.matmul(x, y)
devices = ["cpu"] # No cuda support for ext calls yet
sizes = [
[[128, 128], [128, 128]],
[[10, 10], [10, 10]],
[[1, 16], [16, 128]],
[[128], [128]],
[[128], [128, 128]],
[[3], [3]],
[[3, 4], [4]],
[[10, 3, 4], [4]],
[[10, 3, 4], [10, 4, 5]],
[[10, 3, 4], [4, 5]],
]
# Only 2D x 2D matrix multiply is supported. For non-supported sizes we
# still want to run results verification to test that we didn't
# accidentally fuse it, but we skip the 'is-fused' check.
# TODO: add support for other shape combinations and make this set empty:
skip_is_fused_check_sizes = [
"[[128], [128]]",
"[[128], [128, 128]]",
"[[3], [3]]",
"[[3, 4], [4]]",
"[[10, 3, 4], [4]]",
"[[10, 3, 4], [10, 4, 5]]",
"[[10, 3, 4], [4, 5]]",
]
for dtype, size, device in product(self.dtypes, sizes, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
size_x, size_y = size
x = self.data_for(dtype, device, size=size_x)
y = self.data_for(dtype, device, size=size_y)
ref = fn(x, y)
except Exception as e:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
t(x, y)
self.assertEqual(ref, t(x, y))
if str(size) not in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(" ".join(["Failed:", str(dtype), device])) from e
|
import operator
import os
import unittest
import contextlib
import math
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from typing import List
import warnings
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, slowTest, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining, \
clone_inputs, get_traced_sample_variant_pairs, TensorExprTestOptions, NoTracerWarnContextManager
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests, \
OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from textwrap import dedent
from itertools import product, permutations, combinations
from test_jit import backward_graph, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
from jit.test_fuser_common import TestFuserCommon # noqa: F401
FUSION_GROUP = 'prim::TensorExprGroup'
LLVM_ENABLED = torch._C._llvm_enabled()
autograd_check_set = {'aten::__is__', 'prim::AutogradAllNonZero', 'prim::AutogradAllZero', 'prim::ListConstruct'}
@skipIfTorchDynamo()
class TestTEFuser(JitTestCase):
from functools import partial
|
import contextlib
import math
import operator
import os
import unittest
import warnings
from typing import List
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from itertools import combinations, permutations, product
from textwrap import dedent
from jit.test_fuser_common import TestFuserCommon # noqa: F401
from test_jit import (
backward_graph,
get_lstm_inputs,
get_milstm_inputs,
LSTMCellC,
LSTMCellF,
LSTMCellS,
MiLSTMCell,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import (
enable_profiling_mode_for_profiling_tests,
GRAPH_EXECUTOR,
IS_FBCODE,
ProfilingMode,
run_tests,
skipIfTorchDynamo,
slowTest,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
)
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing._internal.jit_utils import (
clone_inputs,
get_traced_sample_variant_pairs,
JitTestCase,
NoTracerWarnContextManager,
RUN_CUDA,
RUN_CUDA_HALF,
RUN_CUDA_MULTI_GPU,
set_fusion_group_inlining,
TensorExprTestOptions,
warmup_backward,
)
FUSION_GROUP = "prim::TensorExprGroup"
LLVM_ENABLED = torch._C._llvm_enabled()
autograd_check_set = {
"aten::__is__",
"prim::AutogradAllNonZero",
"prim::AutogradAllZero",
"prim::ListConstruct",
}
class TestTEFuser(JitTestCase):
from functools import partial
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jit_llga_fuser.py
|
is_avx512_supported
|
def is_avx512_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "avx512" in lines
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch._C.has_mkldnn or IS_WINDOWS or IS_MACOS
|
def is_avx512_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "avx512" in lines
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch.backends.mkldnn.is_available() or IS_WINDOWS or IS_MACOS
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
import torchvision
import torch._dynamo
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
import torchvision
import torch._dynamo
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jit_llga_fuser.py
|
get_eltwise_fn
|
def get_eltwise_fn(name):
if hasattr(torch, name):
return getattr(torch, name)
elif hasattr(F, name):
return getattr(F, name)
elif name == 'hardswish_':
return torch.nn.Hardswish(inplace=True)
else:
raise NameError('Eltwise function %s not found' % name)
@unittest.skipIf(IS_AVX512_UNSUPPORTED, "This test fails for BF16 on machines without AVX512.")
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestOp(JitLlgaTestCase):
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d(self, dtype):
for [spatial, in_channels, out_channels, kernel, padding, stride, dilation, g, bias] in itertools.product(
[7, 8],
[8, 15],
[7, 16],
[3, 4],
[0, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False]):
m = nn.Conv2d(in_channels=in_channels * g,
out_channels=out_channels * g,
kernel_size=kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=g,
bias=bias)
x = torch.rand(1, in_channels * g, spatial, spatial)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_bn2d(self, dtype):
m = nn.BatchNorm2d(32).eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
# single-op partition shouldn't be created for softmax
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.eltwise = eltwise_fn
def forward(self, x):
return self.eltwise(x)
for eltwise in ['relu', 'gelu']:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
# single-op partition shouldn't be created.
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_max_pool2d(self, dtype):
for [spatial, kernel, padding, stride, dilation, ceil_mode] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2], # [1, 2, 4], TODO: fix issue in pad calculation
[1], # [1, 2], TODO: backend support for dilation
[True, False]):
m = nn.MaxPool2d(kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode)
x = torch.rand(1, 4, spatial, spatial)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_avg_pool2d(self, dtype):
for [spatial, kernel, padding, stride, ceil_mode, count_include_pad] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2, 4],
[False], # TODO: oneDNN Graph does not fully support ceil_mode=True
[True, False]):
m = nn.AvgPool2d(kernel_size=kernel,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad)
x = torch.rand(1, 4, spatial, spatial)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_variable_kernel_avg_pool2d(self, dtype):
class M(nn.Module):
def forward(self, x):
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0, count_include_pad=False)
return x
x = torch.randn(1, 1000, 1, 1)
m = M()
_, graph = self.checkTrace(m, [x], dtype)
# kernel_size is not Constant, shouldn't have any LLGA_FUSION_GROUP
# TODO: with shape specialization, should have 1 LLGA_FUSION_GROUP
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_softmax(self, dtype):
for dim in [-4, -3, -2, -1, 0, 1, 2, 3]:
m = nn.Softmax(dim=dim)
x = torch.rand(8, 12, 12, 12)
_, graph = self.checkTrace(m, [x], dtype)
# single-op partition shouldn't be created for softmax
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_linear(self, dtype):
for bias in [True, False]:
x = torch.rand(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=bias)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::linear'])
def _gen_binary_inputs(self, gen_permute=True):
for xshape, yshape in [
[[1, 32, 28, 28], [1, 32, 28, 28]],
[[1, 32, 28, 28], [1, 1, 28, 28]],
[[1, 32, 28, 28], [28]],
[[1, 32, 28, 28], [1]],
]:
yield torch.rand(xshape), torch.rand(yshape)
if gen_permute and xshape != yshape:
yield torch.rand(yshape), torch.rand(xshape)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_add(self, dtype):
def forward_add(x, y):
return torch.add(x, y, alpha=2)
for x, y in self._gen_binary_inputs():
_, graph = self.checkTrace(forward_add, [x, y], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_add_scalar(self, dtype):
def add_scalar(x):
return 42 + x + 3.14
x = torch.rand(32, 32)
_, graph = self.checkTrace(add_scalar, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_addmm(self, dtype):
# Just a sidenote - comparison of eager-mode & oneDNN Graph JIT outputs of
# addmm (which entails matmul-bias-add fusion) might require higher tolerance
# bounds for BF16. This is subject to change in the near future.
def addmm(x, y, z):
# alpha and beta are 1, by default
return torch.addmm(z, x, y)
x = torch.rand(64, 32)
y = torch.rand(32, 32)
z = torch.rand(64, 32)
_, graph = self.checkTrace(addmm, [x, y, z], dtype)
# single-op partition should be created for matmul with bias.
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_mul(self, dtype):
def forward_mul(x, y):
return torch.mul(x, y) * 3
for x, y in self._gen_binary_inputs():
_, graph = self.checkTrace(forward_mul, [x, y], dtype)
# single-op partitions shouldn't be created
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_identity_binary(self, dtype):
def forward(x):
return x * 1 + 0.0
x = torch.rand(32)
_, graph = self.checkTrace(forward, [x], dtype)
self.assertFused(graph, ['aten::add', 'aten::mul'])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_layer_norm(self, dtype):
# TODO: support more normalized_shape
m = torch.nn.LayerNorm(10)
x = torch.randn(2, 5, 10, 10)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_cat(self, dtype):
def cat_along_dim(d):
def forward_cat(*inputs):
return torch.cat(inputs, d)
return forward_cat
for xshape in [
[8, 8, 8, 8],
[64, 8, 32],
[2048, 64],
]:
for d in range(len(xshape)):
x = torch.rand(xshape)
_, graph = self.checkTrace(cat_along_dim(d), [x, x, x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_typecheck(self, dtype):
x = torch.rand(32, 28, dtype=dtype)
m = torch.nn.Linear(in_features=28, out_features=64, bias=True, dtype=dtype)
traced, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::linear'])
# change the shape of the input, we should enter fallback graph
x = torch.rand(5, 28, dtype=dtype)
self.assertEqual(m(x), traced(x))
@unittest.skipIf(IS_AVX512_UNSUPPORTED, "This test fails for BF16 on machines without AVX512.")
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestFusionPattern(JitLlgaTestCase):
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=False)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
return x
for eltwise in ['relu', 'leaky_relu', 'sigmoid', 'square',
'abs', 'exp', 'hardswish', 'tanh', 'hardtanh']:
for inplace in [True, False]:
eltwise_fn_name = eltwise + '_' if inplace else eltwise
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype=dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# test if relu_ is replace with relu by mutation removal pass
self.assertFused(graph, ['aten::' + eltwise_fn_name])
# test if relu is fused into the fusion group
self.assertFused(graph, ['aten::' + eltwise])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_silu(self, dtype):
class M(nn.Module):
def __init__(self, inplace):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.SiLU(inplace=inplace)
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(inplace)
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# oneDNN graph does not have silu OP. The bridge will convert silu to sigmoid - mul
# Inplace op will become outplace op on the JIT graph
patterns = [
["aten::_convolution", 'aten::sigmoid', 'aten::mul'],
["aten::_convolution"]
]
silu_op = 'aten::silu_' if inplace else 'aten::silu'
self.assertFused(graph, ['aten::_convolution', silu_op])
self.checkPatterns(graph, patterns)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_ensure_tensor_is_rewrapped(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
y = self.conv3(y)
y = self.eltwise(y)
y = self.conv4(y)
y = self.eltwise(y)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = 'relu'
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
m = m.to(memory_format=torch.channels_last)
x = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
y = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
# Simply test if the output is accurate
# The output of the second partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA. In resnext101 32x16d, we encountered an accuracy issue.
_, graph = self.checkTrace(m, [x, y], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 4)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_clamp(self, dtype):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv5 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
def forward(self, x):
x = self.conv1(x)
x = torch.clamp(x, min=float('-inf'))
x = self.conv2(x)
x = torch.clamp(x, min=-5)
x = self.conv3(x)
x = torch.clamp(x, min=0, max=float('inf'))
x = self.conv4(x)
x = torch.clamp(x, min=1, max=5)
x = self.conv5(x)
x = torch.clamp(x, max=2)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
m = M()
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 5)
self.assertFused(graph, ['aten::_convolution', "aten::clamp"])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_bn(self, dtype):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
return x
m = M().eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm'])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_bn_relu(self, dtype):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
return x
m = M().eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
'aten::relu'])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_bn2d_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.eltwise = eltwise_fn
self.bn = nn.BatchNorm2d(32)
def forward(self, x):
x = self.bn(x)
x = self.eltwise(x)
return x
for eltwise in ['relu']:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn).eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::' + eltwise])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_linear_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn, bias):
super().__init__()
self.linear = nn.Linear(28, 64, bias)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
return x
for [has_bias, eltwise] in itertools.product(
[True, False],
['relu', 'gelu', 'sigmoid', 'hardtanh', 'relu6', 'elu']):
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn, has_bias)
x = torch.rand(32, 28, requires_grad=False)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::' + eltwise])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_sum(self, dtype):
class M(nn.Module):
def __init__(self, bias=False):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU()
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn3 = nn.BatchNorm2d(32)
def forward(self, x, y):
x = self.conv1(x)
x = self.bn1(x)
y = self.conv2(y)
y = self.bn2(y)
z = self.relu(x + y)
z = self.conv3(z)
z = self.bn3(z)
return z
for bias in [True, False]:
m = M(bias).eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 32, 16, 16, requires_grad=False)
y = torch.rand(1, 32, 16, 16, requires_grad=False)
_, graph = self.checkTrace(m, [x, y], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_wildcard(self, dtype):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
y = self.eltwise(x)
return [x, y]
# The pattern is as the following:
# conv
# | \
# eltwise \
# | \
# ListConstruct
#
# The output of conv is used by a wildcard op: ListConstruct.
# Thus conv-eltwise cannot be selected into the same Partition.
m = M()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
# conv can exist in a single-op oneDNN Graph partition but not relu
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution'])
@onlyCPU
@dtypes(torch.int32)
def test_wildcard_unsupported_dtype(self, dtype):
class M(nn.Module):
def forward(self, x):
y = x // 2
return y
# In shufflenet_v2_x1_0, channels_per_groups is computed as:
# channels_per_group = num_channels // groups
# JIT IR converts groups to Long dtype, which is unsupported
# by oneDNN Graph, viz. Long(requires_grad=0, device=cpu) = prim::Constant[value={2}]()
# This test just ensures that the bridge code can handle
# unsupported dtypes for inputs to ops unsupported
# by oneDNN Graph. In this particular UT, aten::floor_divide
# would be added as a wildcard in graph-construction stage.
m = M()
x = torch.tensor([32], dtype=dtype)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_rewrap_tensor_input_to_pytorch(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = 'relu'
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
m = m.to(memory_format=torch.channels_last)
x = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
y = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
# Simply test if the output is accurate
# The output of the second partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA, so it must be handled by PyTorch, which should receive
# correct strides info of the channels-last tensor.
graph, _ = self.checkTrace(m, [x, y], dtype)
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestEnableDisableLlgaFuser(JitTestCase):
def setUp(self):
super().setUp()
self.is_enabled = torch._C._jit_set_llga_enabled(False)
def tearDown(self):
torch._C._jit_set_llga_enabled(self.is_enabled)
super().tearDown()
def test_context_manager(self):
x = torch.randn(4, 8)
y = torch.randn(4, 8)
with torch.jit.fuser('fuser3'):
with torch.jit.fuser('fuser3'):
def t1(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t1)
t_jit(x, y)
t_jit(x, y)
self.assertGraphContains(t_jit.graph_for(x, y), LLGA_FUSION_GROUP)
def t2(x, y):
o = x + y
o = o + 3.0
return o
t_jit_2 = torch.jit.script(t2)
t_jit_2(x, y)
t_jit_2(x, y)
self.assertGraphContains(t_jit_2.graph_for(x, y), LLGA_FUSION_GROUP)
def t3(x, y):
o = x + y
o = o + 4.0
return o
t_jit_3 = torch.jit.script(t3)
t_jit_3(x, y)
t_jit_3(x, y)
self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), LLGA_FUSION_GROUP, 0)
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
@unittest.skip("Enable when integration with dynamo aot_autograd is more stable")
class TestDynamoAOT(JitTestCase):
def test_dynamo_aot_ts_onednn(self):
class Seq(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
)
def forward(self, x):
return self.layers(x)
mod = Seq()
import torch._dynamo
aot_mod = torch._dynamo.optimize("aot_ts", nopython=True)(mod)
for _ in range(10):
with torch.jit.fuser("fuser3"):
loss = aot_mod(torch.rand([10, 10])).sum()
loss.backward()
torch._dynamo.reset()
@unittest.skipIf(IS_AVX512_UNSUPPORTED, "This test fails for BF16 on machines without AVX512.")
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestModel(JitLlgaTestCase):
@skipIfNoTorchVision
def _test_vision(self, model_name, dtype):
m = getattr(torchvision.models, model_name)().eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 3, 224, 224) / 10
_, graph = self.checkTrace(m, [x], dtype)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
'aten::relu', 'aten::linear',
'aten::avg_pool2d', 'aten::max_pool2d'])
for model_name, enabled in [
['resnet50', True],
['resnext50_32x4d', True],
['resnext101_32x8d', True],
['densenet121', True],
['densenet161', True],
['densenet169', True],
['densenet201', True],
['efficientnet_b0', True],
['efficientnet_b1', True],
['efficientnet_b2', True],
['efficientnet_b3', True],
['efficientnet_b4', True],
['efficientnet_b5', True],
['efficientnet_b6', True],
['efficientnet_b7', True],
['regnet_y_400mf', True],
['googlenet', TEST_SCIPY],
['mobilenet_v2', True],
['mobilenet_v3_large', True],
['mnasnet1_0', True],
['squeezenet1_0', True],
['vgg16', True],
['alexnet', True],
['shufflenet_v2_x1_0', True],
['wide_resnet50_2', True],
]:
def _wrapper(mname, dtype):
@unittest.skipIf(not enabled, 'Disabled')
@separate_process
def test(self, dtype=dtype):
return self._test_vision(mname, dtype)
return test
for dtype in [torch.bfloat16, torch.float32]:
setattr(TestModel, 'test_vision_%s_%s' % (model_name, str(dtype).split("torch.")[1]), _wrapper(model_name, dtype))
instantiate_device_type_tests(TestFusionPattern, globals())
instantiate_device_type_tests(TestOp, globals())
if __name__ == '__main__':
run_tests()
|
def get_eltwise_fn(name):
if hasattr(torch, name):
return getattr(torch, name)
elif hasattr(F, name):
return getattr(F, name)
elif name == 'hardswish_':
return torch.nn.Hardswish(inplace=True)
else:
raise NameError(f'Eltwise function {name} not found')
@unittest.skipIf(IS_AVX512_UNSUPPORTED, "This test fails for BF16 on machines without AVX512.")
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestOp(JitLlgaTestCase):
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d(self, dtype):
for [spatial, in_channels, out_channels, kernel, padding, stride, dilation, g, bias] in itertools.product(
[7, 8],
[8, 15],
[7, 16],
[3, 4],
[0, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False]):
m = nn.Conv2d(in_channels=in_channels * g,
out_channels=out_channels * g,
kernel_size=kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=g,
bias=bias)
x = torch.rand(1, in_channels * g, spatial, spatial)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_bn2d(self, dtype):
m = nn.BatchNorm2d(32).eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
# single-op partition shouldn't be created for softmax
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.eltwise = eltwise_fn
def forward(self, x):
return self.eltwise(x)
for eltwise in ['relu', 'gelu']:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
# single-op partition shouldn't be created.
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_max_pool2d(self, dtype):
for [spatial, kernel, padding, stride, dilation, ceil_mode] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2], # [1, 2, 4], TODO: fix issue in pad calculation
[1], # [1, 2], TODO: backend support for dilation
[True, False]):
m = nn.MaxPool2d(kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode)
x = torch.rand(1, 4, spatial, spatial)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_avg_pool2d(self, dtype):
for [spatial, kernel, padding, stride, ceil_mode, count_include_pad] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2, 4],
[False], # TODO: oneDNN Graph does not fully support ceil_mode=True
[True, False]):
m = nn.AvgPool2d(kernel_size=kernel,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad)
x = torch.rand(1, 4, spatial, spatial)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_variable_kernel_avg_pool2d(self, dtype):
class M(nn.Module):
def forward(self, x):
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0, count_include_pad=False)
return x
x = torch.randn(1, 1000, 1, 1)
m = M()
_, graph = self.checkTrace(m, [x], dtype)
# kernel_size is not Constant, shouldn't have any LLGA_FUSION_GROUP
# TODO: with shape specialization, should have 1 LLGA_FUSION_GROUP
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_softmax(self, dtype):
for dim in [-4, -3, -2, -1, 0, 1, 2, 3]:
m = nn.Softmax(dim=dim)
x = torch.rand(8, 12, 12, 12)
_, graph = self.checkTrace(m, [x], dtype)
# single-op partition shouldn't be created for softmax
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_linear(self, dtype):
for bias in [True, False]:
x = torch.rand(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=bias)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::linear'])
def _gen_binary_inputs(self, gen_permute=True):
for xshape, yshape in [
[[1, 32, 28, 28], [1, 32, 28, 28]],
[[1, 32, 28, 28], [1, 1, 28, 28]],
[[1, 32, 28, 28], [28]],
[[1, 32, 28, 28], [1]],
]:
yield torch.rand(xshape), torch.rand(yshape)
if gen_permute and xshape != yshape:
yield torch.rand(yshape), torch.rand(xshape)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_add(self, dtype):
def forward_add(x, y):
return torch.add(x, y, alpha=2)
for x, y in self._gen_binary_inputs():
_, graph = self.checkTrace(forward_add, [x, y], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_add_scalar(self, dtype):
def add_scalar(x):
return 42 + x + 3.14
x = torch.rand(32, 32)
_, graph = self.checkTrace(add_scalar, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_addmm(self, dtype):
# Just a sidenote - comparison of eager-mode & oneDNN Graph JIT outputs of
# addmm (which entails matmul-bias-add fusion) might require higher tolerance
# bounds for BF16. This is subject to change in the near future.
def addmm(x, y, z):
# alpha and beta are 1, by default
return torch.addmm(z, x, y)
x = torch.rand(64, 32)
y = torch.rand(32, 32)
z = torch.rand(64, 32)
_, graph = self.checkTrace(addmm, [x, y, z], dtype)
# single-op partition should be created for matmul with bias.
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_mul(self, dtype):
def forward_mul(x, y):
return torch.mul(x, y) * 3
for x, y in self._gen_binary_inputs():
_, graph = self.checkTrace(forward_mul, [x, y], dtype)
# single-op partitions shouldn't be created
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_identity_binary(self, dtype):
def forward(x):
return x * 1 + 0.0
x = torch.rand(32)
_, graph = self.checkTrace(forward, [x], dtype)
self.assertFused(graph, ['aten::add', 'aten::mul'])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_layer_norm(self, dtype):
# TODO: support more normalized_shape
m = torch.nn.LayerNorm(10)
x = torch.randn(2, 5, 10, 10)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_cat(self, dtype):
def cat_along_dim(d):
def forward_cat(*inputs):
return torch.cat(inputs, d)
return forward_cat
for xshape in [
[8, 8, 8, 8],
[64, 8, 32],
[2048, 64],
]:
for d in range(len(xshape)):
x = torch.rand(xshape)
_, graph = self.checkTrace(cat_along_dim(d), [x, x, x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_typecheck(self, dtype):
x = torch.rand(32, 28, dtype=dtype)
m = torch.nn.Linear(in_features=28, out_features=64, bias=True, dtype=dtype)
traced, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::linear'])
# change the shape of the input, we should enter fallback graph
x = torch.rand(5, 28, dtype=dtype)
self.assertEqual(m(x), traced(x))
@unittest.skipIf(IS_AVX512_UNSUPPORTED, "This test fails for BF16 on machines without AVX512.")
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestFusionPattern(JitLlgaTestCase):
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=False)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
return x
for eltwise in ['relu', 'leaky_relu', 'sigmoid', 'square',
'abs', 'exp', 'hardswish', 'tanh', 'hardtanh']:
for inplace in [True, False]:
eltwise_fn_name = eltwise + '_' if inplace else eltwise
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype=dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# test if relu_ is replace with relu by mutation removal pass
self.assertFused(graph, ['aten::' + eltwise_fn_name])
# test if relu is fused into the fusion group
self.assertFused(graph, ['aten::' + eltwise])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_silu(self, dtype):
class M(nn.Module):
def __init__(self, inplace):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.SiLU(inplace=inplace)
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(inplace)
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# oneDNN graph does not have silu OP. The bridge will convert silu to sigmoid - mul
# Inplace op will become outplace op on the JIT graph
patterns = [
["aten::_convolution", 'aten::sigmoid', 'aten::mul'],
["aten::_convolution"]
]
silu_op = 'aten::silu_' if inplace else 'aten::silu'
self.assertFused(graph, ['aten::_convolution', silu_op])
self.checkPatterns(graph, patterns)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_ensure_tensor_is_rewrapped(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
y = self.conv3(y)
y = self.eltwise(y)
y = self.conv4(y)
y = self.eltwise(y)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = 'relu'
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
m = m.to(memory_format=torch.channels_last)
x = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
y = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
# Simply test if the output is accurate
# The output of the second partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA. In resnext101 32x16d, we encountered an accuracy issue.
_, graph = self.checkTrace(m, [x, y], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 4)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_clamp(self, dtype):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv5 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
def forward(self, x):
x = self.conv1(x)
x = torch.clamp(x, min=float('-inf'))
x = self.conv2(x)
x = torch.clamp(x, min=-5)
x = self.conv3(x)
x = torch.clamp(x, min=0, max=float('inf'))
x = self.conv4(x)
x = torch.clamp(x, min=1, max=5)
x = self.conv5(x)
x = torch.clamp(x, max=2)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
m = M()
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 5)
self.assertFused(graph, ['aten::_convolution', "aten::clamp"])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_bn(self, dtype):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
return x
m = M().eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm'])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_bn_relu(self, dtype):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
return x
m = M().eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
'aten::relu'])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_bn2d_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.eltwise = eltwise_fn
self.bn = nn.BatchNorm2d(32)
def forward(self, x):
x = self.bn(x)
x = self.eltwise(x)
return x
for eltwise in ['relu']:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn).eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::' + eltwise])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_linear_eltwise(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn, bias):
super().__init__()
self.linear = nn.Linear(28, 64, bias)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
return x
for [has_bias, eltwise] in itertools.product(
[True, False],
['relu', 'gelu', 'sigmoid', 'hardtanh', 'relu6', 'elu']):
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn, has_bias)
x = torch.rand(32, 28, requires_grad=False)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::' + eltwise])
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_conv2d_sum(self, dtype):
class M(nn.Module):
def __init__(self, bias=False):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU()
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn3 = nn.BatchNorm2d(32)
def forward(self, x, y):
x = self.conv1(x)
x = self.bn1(x)
y = self.conv2(y)
y = self.bn2(y)
z = self.relu(x + y)
z = self.conv3(z)
z = self.bn3(z)
return z
for bias in [True, False]:
m = M(bias).eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 32, 16, 16, requires_grad=False)
y = torch.rand(1, 32, 16, 16, requires_grad=False)
_, graph = self.checkTrace(m, [x, y], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_wildcard(self, dtype):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
y = self.eltwise(x)
return [x, y]
# The pattern is as the following:
# conv
# | \
# eltwise \
# | \
# ListConstruct
#
# The output of conv is used by a wildcard op: ListConstruct.
# Thus conv-eltwise cannot be selected into the same Partition.
m = M()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x], dtype)
# conv can exist in a single-op oneDNN Graph partition but not relu
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution'])
@onlyCPU
@dtypes(torch.int32)
def test_wildcard_unsupported_dtype(self, dtype):
class M(nn.Module):
def forward(self, x):
y = x // 2
return y
# In shufflenet_v2_x1_0, channels_per_groups is computed as:
# channels_per_group = num_channels // groups
# JIT IR converts groups to Long dtype, which is unsupported
# by oneDNN Graph, viz. Long(requires_grad=0, device=cpu) = prim::Constant[value={2}]()
# This test just ensures that the bridge code can handle
# unsupported dtypes for inputs to ops unsupported
# by oneDNN Graph. In this particular UT, aten::floor_divide
# would be added as a wildcard in graph-construction stage.
m = M()
x = torch.tensor([32], dtype=dtype)
_, graph = self.checkTrace(m, [x], dtype)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@onlyCPU
@dtypes(torch.float32, torch.bfloat16)
def test_rewrap_tensor_input_to_pytorch(self, dtype):
class M(nn.Module):
def __init__(self, eltwise_fn):
super().__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = 'relu'
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
m = m.to(memory_format=torch.channels_last)
x = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
y = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
# Simply test if the output is accurate
# The output of the second partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA, so it must be handled by PyTorch, which should receive
# correct strides info of the channels-last tensor.
graph, _ = self.checkTrace(m, [x, y], dtype)
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestEnableDisableLlgaFuser(JitTestCase):
def setUp(self):
super().setUp()
self.is_enabled = torch._C._jit_set_llga_enabled(False)
def tearDown(self):
torch._C._jit_set_llga_enabled(self.is_enabled)
super().tearDown()
def test_context_manager(self):
x = torch.randn(4, 8)
y = torch.randn(4, 8)
with torch.jit.fuser('fuser3'):
with torch.jit.fuser('fuser3'):
def t1(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t1)
t_jit(x, y)
t_jit(x, y)
self.assertGraphContains(t_jit.graph_for(x, y), LLGA_FUSION_GROUP)
def t2(x, y):
o = x + y
o = o + 3.0
return o
t_jit_2 = torch.jit.script(t2)
t_jit_2(x, y)
t_jit_2(x, y)
self.assertGraphContains(t_jit_2.graph_for(x, y), LLGA_FUSION_GROUP)
def t3(x, y):
o = x + y
o = o + 4.0
return o
t_jit_3 = torch.jit.script(t3)
t_jit_3(x, y)
t_jit_3(x, y)
self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), LLGA_FUSION_GROUP, 0)
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
@unittest.skip("Enable when integration with dynamo aot_autograd is more stable")
class TestDynamoAOT(JitTestCase):
def test_dynamo_aot_ts_onednn(self):
class Seq(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = nn.Sequential(
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
)
def forward(self, x):
return self.layers(x)
mod = Seq()
import torch._dynamo
aot_mod = torch._dynamo.optimize("aot_ts", nopython=True)(mod)
for _ in range(10):
with torch.jit.fuser("fuser3"):
loss = aot_mod(torch.rand([10, 10])).sum()
loss.backward()
torch._dynamo.reset()
@unittest.skipIf(IS_AVX512_UNSUPPORTED, "This test fails for BF16 on machines without AVX512.")
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestModel(JitLlgaTestCase):
@skipIfNoTorchVision
def _test_vision(self, model_name, dtype):
m = getattr(torchvision.models, model_name)().eval()
if dtype == torch.bfloat16:
m = optimization.fuse(m)
x = torch.rand(1, 3, 224, 224) / 10
_, graph = self.checkTrace(m, [x], dtype)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
'aten::relu', 'aten::linear',
'aten::avg_pool2d', 'aten::max_pool2d'])
for model_name, enabled in [
['resnet50', True],
['resnext50_32x4d', True],
['resnext101_32x8d', True],
['densenet121', True],
['densenet161', True],
['densenet169', True],
['densenet201', True],
['efficientnet_b0', True],
['efficientnet_b1', True],
['efficientnet_b2', True],
['efficientnet_b3', True],
['efficientnet_b4', True],
['efficientnet_b5', True],
['efficientnet_b6', True],
['efficientnet_b7', True],
['regnet_y_400mf', True],
['googlenet', TEST_SCIPY],
['mobilenet_v2', True],
['mobilenet_v3_large', True],
['mnasnet1_0', True],
['squeezenet1_0', True],
['vgg16', True],
['alexnet', True],
['shufflenet_v2_x1_0', True],
['wide_resnet50_2', True],
]:
def _wrapper(mname, dtype):
@unittest.skipIf(not enabled, 'Disabled')
@separate_process
def test(self, dtype=dtype):
return self._test_vision(mname, dtype)
return test
for dtype in [torch.bfloat16, torch.float32]:
setattr(TestModel, 'test_vision_{}_{}'.format(model_name, str(dtype).split("torch.")[1]), _wrapper(model_name, dtype))
instantiate_device_type_tests(TestFusionPattern, globals())
instantiate_device_type_tests(TestOp, globals())
if __name__ == '__main__':
run_tests()
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch._C.has_mkldnn or IS_WINDOWS or IS_MACOS
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
import torch._dynamo
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch.backends.mkldnn.is_available() or IS_WINDOWS or IS_MACOS
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
import torch._dynamo
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jit_llga_fuser.py
|
test_dynamo_aot_ts_onednn
|
def test_dynamo_aot_ts_onednn(self):
class Seq(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
)
def forward(self, x):
return self.layers(x)
mod = Seq()
import torch._dynamo
aot_mod = torch._dynamo.optimize("aot_ts", nopython=True)(mod)
for _ in range(10):
with torch.jit.fuser("fuser3"):
loss = aot_mod(torch.rand([10, 10])).sum()
loss.backward()
torch._dynamo.reset()
|
def test_dynamo_aot_ts_onednn(self):
class Seq(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = nn.Sequential(
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
)
def forward(self, x):
return self.layers(x)
mod = Seq()
import torch._dynamo
aot_mod = torch._dynamo.optimize("aot_ts", nopython=True)(mod)
for _ in range(10):
with torch.jit.fuser("fuser3"):
loss = aot_mod(torch.rand([10, 10])).sum()
loss.backward()
torch._dynamo.reset()
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch._C.has_mkldnn or IS_WINDOWS or IS_MACOS
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
@unittest.skip("Enable when integration with dynamo aot_autograd is more stable")
class TestDynamoAOT(JitTestCase):
import torch._dynamo
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch.backends.mkldnn.is_available() or IS_WINDOWS or IS_MACOS
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
@unittest.skip("Enable when integration with dynamo aot_autograd is more stable")
class TestDynamoAOT(JitTestCase):
import torch._dynamo
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jit_llga_fuser.py
|
_wrapper
|
def _wrapper(mname, dtype):
@unittest.skipIf(not enabled, 'Disabled')
@separate_process
def test(self, dtype=dtype):
return self._test_vision(mname, dtype)
return test
for dtype in [torch.bfloat16, torch.float32]:
setattr(TestModel, 'test_vision_%s_%s' % (model_name, str(dtype).split("torch.")[1]), _wrapper(model_name, dtype))
|
def _wrapper(mname, dtype):
@unittest.skipIf(not enabled, 'Disabled')
@separate_process
def test(self, dtype=dtype):
return self._test_vision(mname, dtype)
return test
for dtype in [torch.bfloat16, torch.float32]:
setattr(TestModel, 'test_vision_{}_{}'.format(model_name, str(dtype).split("torch.")[1]), _wrapper(model_name, dtype))
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch._C.has_mkldnn or IS_WINDOWS or IS_MACOS
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
import torch._dynamo
|
import sys
import torch
import unittest
import itertools
import torch.nn as nn
from functools import wraps
from concurrent import futures
import torch.nn.functional as F
import torch.fx.experimental.optimization as optimization
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
dtypes
)
IS_AVX512_UNSUPPORTED = not is_avx512_supported()
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch.backends.mkldnn.is_available() or IS_WINDOWS or IS_MACOS
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
import torch._dynamo
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jiterator.py
|
ref_fn
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
# On cuda 11.3, nvrtcCompileProgram is taking too long to
# compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.
@skipCUDAIfVersionLessThan((11, 6))
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfVersionLessThan, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jiterator.py
|
ref_fn
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
# On cuda 11.3, nvrtcCompileProgram is taking too long to
# compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.
@skipCUDAIfVersionLessThan((11, 6))
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfVersionLessThan, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jiterator.py
|
ref_fn
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
# On cuda 11.3, nvrtcCompileProgram is taking too long to
# compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.
@skipCUDAIfVersionLessThan((11, 6))
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfVersionLessThan, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jiterator.py
|
ref_fn
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
# On cuda 11.3, nvrtcCompileProgram is taking too long to
# compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.
@skipCUDAIfVersionLessThan((11, 6))
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfVersionLessThan, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_jiterator.py
|
ref_fn
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
# On cuda 11.3, nvrtcCompileProgram is taking too long to
# compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.
@skipCUDAIfVersionLessThan((11, 6))
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfVersionLessThan, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_legacy_vmap.py
|
test_in_dims_wrong_type_err_msg
|
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
|
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r"expected `in_dims` to be int or a \(potentially nested\) tuple"
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, "lol")(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests
import types
FALLBACK_REGEX = r'There is a performance drop'
class TestVmapAPI(TestCase):
|
import functools
import itertools
import types
import warnings
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
FALLBACK_REGEX = r"There is a performance drop"
class TestVmapAPILegacy(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_legacy_vmap.py
|
test_fallback_masked_fill
|
def test_fallback_masked_fill(self):
# NB: One day we will implement a batching rule for masked_fill
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
|
def test_fallback_masked_fill(self):
# NB: One day we will implement a batching rule for masked_fill
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 11, 13)
self._assert_uses_vmap_fallback(
(torch.index_add, (0, None, None, 0)), (x, dim, index, values)
)
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(x, dim + 1, index, values.view(B0, 3, 11, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests
import types
FALLBACK_REGEX = r'There is a performance drop'
class TestVmapAPI(TestCase):
|
import functools
import itertools
import types
import warnings
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
FALLBACK_REGEX = r"There is a performance drop"
class TestVmapAPILegacy(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_legacy_vmap.py
|
run_test
|
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
|
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 11, 13)
self._assert_uses_vmap_fallback(
(torch.index_add, (0, None, None, 0)), (x, dim, index, values)
)
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(x, dim + 1, index, values.view(B0, 3, 11, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests
import types
FALLBACK_REGEX = r'There is a performance drop'
|
import functools
import itertools
import types
import warnings
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
FALLBACK_REGEX = r"There is a performance drop"
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_legacy_vmap.py
|
allowVmapFallbackUsage
|
def allowVmapFallbackUsage(fn):
fn._allow_vmap_fallback_usage = True
return fn
# All tests of TestVmapBase check that the slow vmap fallback is never invoked.
# This is so that we can incrementally add batching rules for operators to
# replace the slow vmap fallback path for said operators. To skip this check,
# please use the allowVmapFallbackUsage decorator.
#
# NB: Don't add tests to TestVmapBase directly, unless you want them to run
# on every subclass of TestVmapBase. Add them to e.g. TestVmapOperators.
#
# NB: TestVmapBase is a nested class. This prevents test runners from picking
# it up and running it.
class Namespace:
class TestVmapBase(TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is None:
return
if not should_allow_vmap_fallback_usage(test_method):
setattr(self, method_name,
self._wrap_method_with_vmap_fallback_check(test_method))
def _wrap_method_with_vmap_fallback_check(self, method):
msg = (
'Expected the test to not invoke the vmap fallback path, i.e., '
'all of the operators being tested in this test should have batching '
'rules implemented. If you are intentionally testing something to '
'do with the fallback path, use allowVmapFallbackUsage. Otherwise, '
'please make sure that batching rules are implemented for the '
'operator(s) being tested.'
)
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings(record=True) as wa:
warnings.simplefilter('always')
with EnableVmapFallbackWarnings():
method(*args, **kwargs)
for captured_warning in wa:
self.assertNotRegex(str(captured_warning.message), FALLBACK_REGEX, msg)
return types.MethodType(wrapper, self)
@allowVmapFallbackUsage
def test_vmap_fallback_check_ok(self):
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
vmap(op_using_fallback)(torch.rand(3))
def test_vmap_fallback_check(self):
@self._wrap_method_with_vmap_fallback_check
def no_fallback(self):
pass
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
@self._wrap_method_with_vmap_fallback_check
def uses_fallback(self):
vmap(op_using_fallback)(torch.rand(3))
no_fallback(self)
with self.assertRaises(AssertionError):
uses_fallback(self)
class TestVmapOperators(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _test_unary(self, op, getter, device, *args, **kwargs):
test = functools.partial(self._vmap_test, *args, **kwargs)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
in_dims=2, out_dims=2)
def test_unary_pointwise_ops(self):
cases = [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
]
for op, getter in cases:
self._test_unary(op, getter, 'cpu')
def test_clone(self):
# Some basic tests
self._test_unary(lambda x: x.clone(), TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.preserve_format),
TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.contiguous_format),
TensorFactory.randn, 'cpu')
# Test that the per-examples are contiguous when using torch.contiguous_format
def clone_contiguous(x):
return x.clone(memory_format=torch.contiguous_format)
B0, B1 = 3, 5
x = torch.randn(2, B0, 7)
y = vmap(clone_contiguous, in_dims=1, out_dims=1)(x)
self.assertTrue(y.movedim(1, 0).is_contiguous())
self.assertTrue(y[:, 0, :].is_contiguous())
x = torch.randn(2, B0, 7, B1)
y = vmap(vmap(clone_contiguous, in_dims=2), in_dims=1)(x)
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
msg = r'only supported with memory_format torch.preserve_format or torch.contiguous_format'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(torch.randn(B0))
def test_binary_pointwise_ops(self):
def get_number(getter):
return getter([]).item()
def make_case(op, input_getter=TensorFactory.randn):
return (op, input_getter)
cases = [
# Basic arithmetic
make_case(torch.add),
make_case(lambda x, y: x + y),
make_case(torch.sub),
make_case(lambda x, y: x - y),
make_case(torch.mul),
make_case(lambda x, y: x * y),
make_case(torch.div, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1),
make_case(torch.pow, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1),
]
test = self._vmap_test
for op, getter in cases:
device = 'cpu'
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
# Python number overload: op(Tensor, Number) (and vice-versa)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
# Type promotion: op(Logical Scalar Tensor, Logical Scalar Tensor)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
# Type promotion: op(Tensor, Logical Scalar Tensor) (and vice-versa)
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
continue
# TODO(rzou): fix the following
# # Test cross-device scalars
# number = get_number(getter)
# self._test_unary(lambda t: op(t, number), getter, device='cuda')
# self._test_unary(lambda t: op(number, t), getter, device='cuda')
# self._test_unary(lambda t: op(t, torch.tensor(number)), getter, device='cuda')
def test_as_strided(self):
def _test(sizes, strides, offset, tensor, lambd):
result = vmap(lambda t: t.as_strided(sizes, strides, offset))(tensor)
expected = vmap(lambd)(tensor)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# single vmap test
B0 = 5
tensors = [
# contiguous
torch.randn(B0, 2, 3),
# non-contiguous
torch.randn(B0, 3, 2).transpose(1, 2),
# non-zero storage offset
torch.randn(2, B0, 2, 3)[1],
# non-contiguous strides, zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 0, :, 0],
# non-contiguous strides, non-zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 2, :, 1],
]
for x in tensors:
S0, S1 = x.stride()[1:]
offset = x.storage_offset()
# Broadcast
_test([5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3))
# transpose
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
# select
_test([2], [S0], offset + S1, x, lambda x: x[:, 1])
# Nested vmap test
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
result = vmap(vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1)(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# Check that mal-formatted size/strides doesn't crash
with self.assertRaisesRegex(RuntimeError, 'size and stride must have the same length'):
x = torch.randn(B0, 2, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
# Sanity check #1: we require the batch dims to be at the front of the
# tensor (in memory layout).
msg = 'batch dims being vmapped over are at the front of the tensor'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([2, 3], [B0 * 3, 1]))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 2, 3, B1).movedim(3, 1)
vmap(vmap(lambda x: x.as_strided([2, 3], [B1 * 3, B1])))(x)
# All the Sanity check #2{a,b,c} cases check that
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# doesn't index memory that is out of bounds of xs[i]. This condition
# is important to the correctness of the as_strided batching rule
# (see NOTE: [When will the as_strided_batching_rule fail?])
# Sanity check #2a: The maximum indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is less than or equal to the maximum indexable location of xs[i].
msg = 'This is not supported inside of vmap'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3)
vmap(lambda x: x.as_strided([3], [1], 1))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3, 5)
vmap(lambda x: x.as_strided([4, 4], [4, 1], 0))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, B1, 3, 5)
vmap(vmap(lambda x: x.as_strided([4, 4], [4, 1], 0)))(x)
# Sanity check #2b: The min indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is greater than or equal to the min indexable location of xs[i].
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3)[1]
vmap(lambda x: x.as_strided([3], [1], B0 * 3 - 1))(x)
# Sanity check #2c:
# xs[i] is a zero-dim tensor, but
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is not
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 0, 3)
vmap(lambda x: x.as_strided([3], [1]))(x)
def test_bmm(self):
op = torch.bmm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 3, 3, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(2, 5, 3)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 3, 5), torch.rand(2, 5, 3)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5, 3), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5, 3), torch.rand(B1, B0, 2, 3, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(B0, 2, 5, 3)))
test(vmap(op), (torch.rand(B1, B0, 2, 3, 5), torch.rand(B0, B1, 2, 5, 3)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 3, 5), torch.rand(B0, 2, 5, 3)), in_dims=(None, 0))
def test_cat(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.cat(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 2), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(2), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(3, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(17, 2), torch.rand(17, 3, B0)), in_dims=(None, 2))
test(vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 3)), in_dims=(None, 0))
test(vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 3)), in_dims=(None, 0))
def test_conj(self):
op = torch.conj
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
test = self._vmap_test
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
# correctness tests
run_test(torch.float)
run_test(torch.cfloat)
# check that torch.conj on a non-complex tensor returns the same tensor
real_tensor = torch.randn(3)
result = vmap(op)(real_tensor)
self.assertEqual(result.data_ptr(), real_tensor.data_ptr())
def test_contiguous(self):
op = Tensor.contiguous
self._test_unary(op, TensorFactory.randn, 'cpu')
# check that contiguous returns the original tensor if the per-examples
# are already contiguous
B0 = 3
x = torch.randn(B0, 2, 5, 7)
x = x.movedim(0, 2)
result = vmap(Tensor.contiguous, in_dims=2, out_dims=2)(x)
self.assertTrue(result is x)
msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
tensor = torch.randn(B0, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last_3d))(tensor)
def test_stride(self):
B0 = 3
x = torch.randn(B0, 2, 5, 7)
def foo(x):
assert x.stride() == (7 * 5, 7, 1)
return x
vmap(foo)(x)
x = torch.randn(2, B0, 5, 7).movedim(1, 0)
def bar(x):
assert x.stride() == (7 * 5 * B0, 7, 1)
return x
vmap(bar)(x)
def test_chunk(self):
test = self._vmap_view_test
op = torch.chunk
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 15, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 9, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 4, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_clamp(self):
clamp_cases = (
(lambda t: t.clamp(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp(max=0.5), TensorFactory.randn),
(lambda t: t.clamp(min=-0.5, max=0.5), TensorFactory.randn),
(lambda t: t.clamp_min(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp_max(max=0.5), TensorFactory.randn),
)
for op, getter in clamp_cases:
self._test_unary(op, getter, 'cpu')
def test_comparison_ops(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
getter = TensorFactory.randn
B0, B1 = 7, 11
ops = (
torch.eq, lambda x, y: x == y,
torch.gt, lambda x, y: x > y,
torch.ge, lambda x, y: x >= y,
torch.le, lambda x, y: x <= y,
torch.lt, lambda x, y: x < y,
torch.ne, lambda x, y: x != y,
)
for op in ops:
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3]), getter([B0, 3])))
test(op, (getter([B0]), getter([B0, 2, 3])))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1), out_dims=1)
test(op, (getter([B0]), getter([2, 3])), in_dims=(0, None))
test(op, (getter([2, 3]), getter([B0, 3])), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3]), getter([B0, B1, 3])))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3]), getter([B1, 3])), in_dims=(0, None))
# test number as inputs
number = getter([]).item()
self._test_unary(lambda t: op(t, number), getter, 'cpu', check_propagates_grad=False)
def test_diagonal(self):
tensor = torch.randn(3, 5, 7, 11, 13)
test = self._vmap_view_test
op = torch.diagonal
test(op, (tensor, 1, 0, 1), in_dims=(0, None, None, None))
test(op, (tensor, 0, 2, -1), in_dims=(0, None, None, None))
test(op, (tensor, 2, 1, 2), in_dims=(1, None, None, None))
test(op, (tensor, 0, -2, -1), in_dims=(1, None, None, None), out_dims=1)
test(vmap(lambda t: op(t, 0, 0, -1)), (tensor,), in_dims=1, out_dims=1)
test(vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
(tensor,), in_dims=1, out_dims=1)
def test_dot(self):
op = torch.dot
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 5), torch.rand(5)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 5), torch.rand(5)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(5), torch.rand(B0, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(5), torch.rand(B1, B0, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 5), torch.rand(B0, 5)), in_dims=(None, 0))
def test_expand_as(self):
op = torch.Tensor.expand_as
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 1, 5), torch.rand(B0, 2, 3, 5)))
test(op, (torch.rand(B0, 1, 5), torch.rand(2, 3, 5)), in_dims=(0, None))
test(op, (torch.rand(1, 5), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B0, B1, 2, 3, 5)))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)), in_dims=(0, 1))
test(vmap(op), (torch.rand(B0, B1), torch.rand(B1, 2, 3, 5)), in_dims=(0, None))
test(vmap(vmap(op)), (torch.rand(B0, B1, B2), torch.rand(B0, B1, B2, 2, 3, 5)))
def test_fill_and_zero_inplace(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
ops = (
lambda t: t.fill_(0.1),
lambda t: t.fill_(torch.tensor(0.2)),
lambda t: t.zero_(),
)
for op in ops:
# Single vmap, various in_dims / out_dims
test(op, [TensorFactory.randn([B0, 3])])
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2)
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [TensorFactory.randn([B0, B1])])
test(vmap(op), [TensorFactory.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [TensorFactory.randn([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
# test when value is a batched tensor for fill_ operator
B0, B1 = 3, 5
test(Tensor.fill_, [TensorFactory.randn([B0, B1]), TensorFactory.randn(B0)])
with self.assertRaisesRegex(RuntimeError,
r"output with shape .+ doesn't match the broadcast shape"):
# Runtime Error is thrown when the tensor being written to isn't being vmapped over
vmap(Tensor.fill_, (None, 0))(TensorFactory.randn([B0, B1]),
TensorFactory.randn([B0]))
def _test_complex_views(self, op, dtypes):
test = self._vmap_view_test
def run_test(op, dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([3, B0])], in_dims=1)
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, 3, B0])], in_dims=4)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
for dtype in dtypes:
run_test(op, dtype)
def test_real(self):
self._test_complex_views(torch.real, dtypes=[torch.cfloat, torch.cdouble])
def test_imag(self):
self._test_complex_views(torch.imag, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_real(self):
self._test_complex_views(torch.view_as_real, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_complex(self):
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
op = torch.view_as_complex
test = self._vmap_view_test
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3, 2])])
test(op, [get([2, 5, B0, 3, 2])], in_dims=2)
test(op, [get([2, 5, B0, 3, 2])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1, 2])])
test(vmap(op), [get([B1, 2, 5, B0, 3, 2])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3, 2])],
in_dims=2, out_dims=2)
# Interesting case #1: Batch dim directly before dim of size 2
test(op, [get([3, B0, 2])], in_dims=1)
test(vmap(op, in_dims=1), [get([3, B1, B0, 2])], in_dims=2)
# Interesting case #2: Batch dim at end of tensor, success cases
# view_as_complex requires that the dim with size 2 have stride 1
# in order for the view to function propertly
test(op, [get([B0, 2]).transpose(0, 1)], in_dims=1)
test(vmap(op, in_dims=1), [get([B0, B1, 2]).movedim(1, 2)])
test(vmap(op, in_dims=2), [get([B0, 3, B1, 2]).movedim(2, 3)])
# Interesting case #3: Batch dim at end of tensor, failure cases
msg = "Tensor must have a last dimension with stride 1"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([2, B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=1), in_dims=1)(get([2, B0, B1]))
# Invalid input: no dimension of size 2
msg = 'Input tensor must have one or more dimensions'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(get([B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op))(get([B0, B1]))
# Invalid input: Batch dim has size 2, but the logical last dim does
# not have size 2
msg = 'Tensor must have a last dimension of size 2'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([3, 2]))
for dtype in [torch.float, torch.double]:
run_test(dtype)
def test_is_complex(self):
ctensor = torch.randn(3, dtype=torch.cfloat)
tensor = torch.randn(3)
def foo(x):
if x.is_complex():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(ctensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(tensor), torch.tensor([0, 0, 0]))
def test_is_floating_point(self):
float_tensor = torch.tensor([1., 2., 3.])
long_tensor = torch.tensor([1, 2, 3])
def foo(x):
if x.is_floating_point():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(long_tensor), torch.tensor([0, 0, 0]))
def test_is_contiguous(self):
def foo(x):
if x.is_contiguous():
return torch.tensor(1.)
else:
return torch.tensor(0.)
B0, B1 = 3, 5
# Single batch dim
contig = torch.randn(B0, 2, 7)
self.assertEqual(vmap(foo)(contig), torch.ones(B0))
noncontig = torch.randn(2, B0, 7)
self.assertEqual(vmap(foo, in_dims=1)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, B0, 7).movedim(1, 0)
self.assertEqual(vmap(foo)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, 7, B0)
self.assertEqual(vmap(foo, in_dims=2)(noncontig), torch.zeros(B0))
# Multiple batch dims
contig = torch.randn(B0, B1, 3)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3)
self.assertEqual(vmap(vmap(foo), in_dims=1)(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3).movedim(0, 1)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
noncontig = torch.randn(B0, 3, B1)
self.assertEqual(vmap(vmap(foo, in_dims=1))(noncontig), torch.zeros(B0, B1))
# is_contiguous on empty tensor is True
def bar(x):
assert x.is_contiguous()
return x
vmap(bar)(torch.randn(B0, 0, 3))
vmap(bar, in_dims=1)(torch.randn(0, B0, 3))
vmap(bar)(torch.randn(B0, 0, 3).mT)
# is_contiguous with other memory formats
def baz(x, memory_format):
x.is_contiguous(memory_format=memory_format)
return x
msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
tensor = torch.randn(B0, 2, 7, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last_3d))(tensor)
def test_movedim(self):
op = torch.movedim
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
# movedim(tensor, int, int) variant
test(op, (torch.rand(B0, 2, 5), 0, 1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 0, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 2, B0, 5), 0, 1), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), 0, 1), in_dims=(2, None, None))
# movedim(tensor, intlist, intlist) variant
test(op, (torch.rand(B0, 2, 3, 5), [1, 0], [0, 2]), in_dims=(0, None, None))
test(op, (torch.rand(2, 3, B0, 5), [1, 0], [0, 2]), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]), in_dims=(2, None, None))
def test_mm(self):
op = torch.mm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5, 2)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5, 2)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5, 2)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5, 2)))
test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5, 2)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
def test_mv(self):
op = torch.mv
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
def test_narrow(self):
op = torch.narrow
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), -1, 1, 3), in_dims=(0, None, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1, 3), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5), 1, 0, 0), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5, B2), -1, 2, 3), in_dims=(2, None, None, None))
def test_new_empty(self):
# Empty is non-deterministic so we just check that the shape of the
# output tensor is what we expect and that the vmap fallback isn't used.
op = Tensor.new_empty
B0, B1 = 7, 11
result = vmap(lambda x: op(x, [2, 3]))(torch.randn(B0))
self.assertEqual(result.shape, [B0, 2, 3])
result = vmap(lambda x: op(x, []))(torch.randn(B0))
self.assertEqual(result.shape, [B0])
result = vmap(vmap(lambda x: op(x, [2, 3])))(torch.randn(B0, B1))
self.assertEqual(result.shape, [B0, B1, 2, 3])
def test_new_empty_strided(self):
# Empty is non-deterministic so we just check that the size and shape
# of the output are what we expect and that the vmap fallback isn't used
B0, B1 = 7, 11
def _test_single_vmap(size, stride, B0):
x = torch.randn(B0)
result = vmap(lambda x: x.new_empty_strided(size, stride))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0] + size)
self.assertEqual(result.stride(), [S] + stride)
def _test_double_vmap(size, stride, B0, B1):
x = torch.randn(B0, B1)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
x = torch.randn(B1, B0)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)), in_dims=1)(x)
S = x.new_empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
# contiguous case
_test_single_vmap([2, 3, 5], [3 * 5, 5, 1], B0)
_test_double_vmap([2, 3, 5], [3 * 5, 5, 1], B0, B1)
# expanded
_test_single_vmap([2, 3, 5], [0, 5, 1], B0)
_test_double_vmap([2, 3, 5], [0, 5, 1], B0, B1)
# some of these cases are pretty strange, just verifying that if
# empty_strided allows them then BatchedTensor.new_empty_strided
# can as well
for shape in [[2, 3, 4], [0, 2, 0]]:
for strides in [[12, 4, 1], [2, 4, 6], [0, 0, 0]]:
_test_single_vmap(shape, strides, B0)
_test_double_vmap(shape, strides, B0, B1)
def test_new_zeros(self):
op = Tensor.new_zeros
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
test(lambda x: op(x, 2, 3), (torch.rand(B0),))
test(lambda x: op(x, []), (torch.rand(B0),))
test(vmap(lambda x: op(x, 3, 5)), (torch.rand(B0, B1),))
def test_select(self):
op = torch.select
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), 0, 0), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1), in_dims=(1, None, None))
test(vmap(lambda t: op(t, 1, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
def test_stack(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.stack(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 3), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(3), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 2)), in_dims=(None, 0))
test(vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 2)), in_dims=(None, 0))
def test_slice(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda t: t[0:1], (torch.rand(B0, 3, 5),))
test(lambda t: t[:, 1:3], (torch.rand(3, 5, B0),), in_dims=2)
test(vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2)
test(vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
(torch.rand(3, 5, B0, B1, B2),), in_dims=2)
def test_squeeze(self):
test = self._vmap_view_test
op = torch.squeeze
B0, B1 = 1, 11
test(op, (torch.rand(B0),))
test(op, (torch.rand(B0, 3, 5),))
test(op, (torch.rand(1, B0, 5),), in_dims=1)
test(op, (torch.rand(B0, 0, 1, 5, 1),))
test(op, (torch.rand(B0, 1, 1, 1, 1),))
test(vmap(op), (torch.rand(B0, B1, 1),))
test(vmap(op), (torch.rand(B1, 1, B0),), in_dims=2)
def test_sum_dim(self):
test = self._vmap_test
B0, B1 = 5, 7
# Single vmap, various in_dims / out_dims
test(lambda x: x.sum(()), [torch.randn([B0])])
test(lambda x: x.sum(()), [torch.randn([B0, 2])])
test(lambda x: x.sum(0), [torch.randn([B0])])
test(lambda x: x.sum(-1), [torch.randn([B0])])
test(lambda x: x.sum(0), [torch.randn([B0, 3])])
test(lambda x: x.sum(-1), [torch.randn([2, 5, B0, 3])], in_dims=2)
test(lambda x: x.sum(2), [torch.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(lambda x: x.sum(())), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(0)), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(-1)), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(-2)), [torch.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(lambda x: x.sum(2), in_dims=2), [torch.randn([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
def test_reshape(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.reshape
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False)
test(vmap(lambda t: t.reshape([-1])), (torch.rand(B0, B1, 2, 5),), check_view=True)
test(vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
(torch.rand(3, B1, 2, B2, 5, B0),), in_dims=5, check_view=False)
def test_reshape_as(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.reshape_as
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)), check_view=True)
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0), check_view=True)
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), torch.rand(1, 1, 10)), in_dims=(1, None), check_view=False)
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)), check_view=True)
test(vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
(torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
in_dims=(5, 0), check_view=False)
def test_result_type(self):
def scalar_tensor_with_dtype(op):
def wrapped(*args, **kwargs):
dtype = op(*args, **kwargs)
return torch.ones([], dtype=dtype)
return wrapped
test = self._vmap_test
op = scalar_tensor_with_dtype(torch.result_type)
B0 = 2
test(op, (torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0, 2),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0, 2),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
def test_tensor_split(self):
test = self._vmap_view_test
op = torch.tensor_split
B0, B1, B2 = 7, 11, 13
# tests for torch.tensor_split(self, indices_or_sections: int, dim)
test(op, (torch.rand(B0, 2, 1024), 5, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 150, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.tensor_split(self, indices_or_sections: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [50, 100, 378, 890], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [50, 100, 212, 345, 0, 378, 890], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [50, 100, 212, 345, 0, 378, 890], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4, 8, 9, 34, 29], 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_split(self):
test = self._vmap_view_test
op = torch.split
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 101, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 130, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.split(self, split_size: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [1, 1020, 3], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_trace(self):
op = torch.trace
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_transpose(self):
op = torch.transpose
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda x: op(x, 0, 1), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, -1, -2), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, 3, 1), (torch.rand(B0, 2, 5, 4, 6),))
test(lambda x: op(x, 1, 0), (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(lambda x: op(x, 0, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda x: op(x, 0, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
# Special case: scalar tensor
for dim1, dim2 in itertools.product([0, -1], [0, -1]):
x = torch.rand(B0)
result = vmap(lambda x: op(x, dim1, dim2))(x)
self.assertTrue(result is x)
def test_t(self):
op = torch.t
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_T_numpy(self):
def op(t):
return t.T
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 3, 5),))
test(op, (torch.rand(2, B0, 3, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(op), (torch.rand(B1, 2, B0, 3, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 3, B2, 5),), in_dims=2)
def test_to(self):
test = self._vmap_test
B0, B1 = 7, 11
test(lambda t: t.to('cpu'), (torch.rand(B0),))
test(lambda t: t.to(torch.double), (torch.rand(B0),))
test(lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64)))
test(lambda t, o: t.to(o),
(torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
in_dims=(0, None))
test(vmap(lambda t: t.to(torch.double)), (torch.rand(B0, B1, 3),))
# also test some casting methods
test(lambda t: t.double(), (torch.rand(B0),))
test(lambda t: t.float(), (torch.rand(B0),))
test(lambda t: t.int(), (torch.rand(B0),), check_propagates_grad=False)
test(lambda t: t.long(), (torch.rand(B0),), check_propagates_grad=False)
def test_unfold(self):
op = torch.Tensor.unfold
test = self._vmap_view_test
B0, B1, B2 = 3, 2, 5
test(op, (torch.rand(B0, 7, 11), 0, 2, 1), in_dims=(0, None, None, None))
test(op, (torch.rand(7, B0, 11), 1, 4, 2), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11), 1, 5, 1), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11, B2), -1, 2, 4), in_dims=(2, None, None, None))
def test_unbind(self):
test = self._vmap_view_test
op = torch.unbind
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 1024), -1), in_dims=(0, None))
test(op, (torch.rand(B0, 2, 0),))
test(op, (torch.rand(2, B0, 7), 0), in_dims=(1, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, 1023, B0, 5), 1),
in_dims=(2, None))
test(vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
(torch.rand(B1, 2, B0, 32, B2),), in_dims=2)
def test_view(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, None))(torch.rand(2, B0, 5), [10])
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), [1, 2, 1, 10]), in_dims=(0, None))
test(vmap(lambda t: t.view([-1])), (torch.rand(B0, B1, 2, 5, 3),))
test(vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
(torch.rand(B2, B0, B1, 3, 2, 5),), in_dims=1)
def test_view_as(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view_as
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, 0))(torch.rand(2, B0, 5), torch.rand(B0, 10))
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)))
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0))
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), torch.rand(2, 1, 1, 10)), in_dims=(0, None))
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)))
test(vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
(torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
in_dims=(2, 0))
def test_no_random_op_support(self):
B0 = 2
captured = torch.rand(3)
random_ops = [
# out-of-place on BatchedTensor
(torch.bernoulli, (torch.rand(B0, 1),)),
(lambda t: torch.bernoulli(t, p=0.5), (torch.rand(B0, 1),)),
(lambda t: torch.multinomial(t, 2), (torch.rand(B0, 3),)),
(torch.normal, (torch.randn(B0, 1), torch.randn(B0, 1))),
(lambda t: torch.normal(t, 1.), (torch.randn(B0, 1),)),
(lambda t: torch.normal(0., t), (torch.randn(B0, 1),)),
(torch.poisson, (torch.rand(B0, 1),)),
(torch.rand_like, (torch.rand(B0, 1),)),
(torch.randn_like, (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 2), (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 0, 2), (torch.rand(B0, 1),)),
# out-of-place on captured tensor
(lambda t: torch.bernoulli(captured), (torch.rand(B0),)),
(lambda t: torch.bernoulli(captured, p=0.5), (torch.rand(B0),)),
(lambda t: torch.multinomial(captured, 2), (torch.rand(B0),)),
(lambda t: torch.normal(captured, captured), (torch.randn(B0),)),
(lambda t: torch.normal(captured, 1.), (torch.randn(B0),)),
(lambda t: torch.normal(0., captured), (torch.randn(B0),)),
(lambda t: torch.poisson(captured), (torch.rand(B0),)),
(lambda t: torch.rand_like(captured), (torch.rand(B0),)),
(lambda t: torch.randn_like(captured) , (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 2), (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 0, 2), (torch.rand(B0),)),
# in-place on BatchedTensor
(lambda t: t.bernoulli_(), (torch.randn(B0, 1),)),
(lambda t: t.cauchy_(), (torch.randn(B0, 1),)),
(lambda t: t.exponential_(), (torch.randn(B0, 1),)),
(lambda t: t.geometric_(0.5), (torch.randn(B0, 1),)),
(lambda t: t.log_normal_(), (torch.randn(B0, 1),)),
(lambda t: t.normal_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(0, 2), (torch.randn(B0, 1),)),
(lambda t: t.random_(2), (torch.randn(B0, 1),)),
(lambda t: t.uniform_(), (torch.randn(B0, 1),)),
# in-place on captured tensor
(lambda t: captured.bernoulli_(), (torch.randn(B0),)),
(lambda t: captured.cauchy_(), (torch.randn(B0),)),
(lambda t: captured.exponential_(), (torch.randn(B0),)),
(lambda t: captured.geometric_(0.5), (torch.randn(B0),)),
(lambda t: captured.log_normal_(), (torch.randn(B0),)),
(lambda t: captured.normal_(), (torch.randn(B0),)),
(lambda t: captured.random_(), (torch.randn(B0),)),
(lambda t: captured.random_(0, 2), (torch.randn(B0),)),
(lambda t: captured.random_(2), (torch.randn(B0),)),
(lambda t: captured.uniform_(), (torch.randn(B0),)),
# factory functions
(lambda t: torch.rand(1), (torch.randn(B0),)),
(lambda t: torch.randn(1), (torch.randn(B0),)),
(lambda t: torch.randint(5, [1]), (torch.randn(B0),)),
(lambda t: torch.randperm(5), (torch.randn(B0),)),
]
for op, args in random_ops:
with self.assertRaisesRegex(RuntimeError,
'vmap: We do not yet support calling random operations'):
vmap(op)(*args)
|
def allowVmapFallbackUsage(fn):
fn._allow_vmap_fallback_usage = True
return fn
# All tests of TestVmapBaseLegacy check that the slow vmap fallback is never invoked.
# This is so that we can incrementally add batching rules for operators to
# replace the slow vmap fallback path for said operators. To skip this check,
# please use the allowVmapFallbackUsage decorator.
#
# NB: Don't add tests to TestVmapBaseLegacy directly, unless you want them to run
# on every subclass of TestVmapBaseLegacy. Add them to e.g. TestVmapOperators.
#
# NB: TestVmapBaseLegacy is a nested class. This prevents test runners from picking
# it up and running it.
class Namespace:
class TestVmapBaseLegacy(TestCase):
def __init__(self, method_name="runTest"):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is None:
return
if not should_allow_vmap_fallback_usage(test_method):
setattr(
self,
method_name,
self._wrap_method_with_vmap_fallback_check(test_method),
)
def _wrap_method_with_vmap_fallback_check(self, method):
msg = (
"Expected the test to not invoke the vmap fallback path, i.e., "
"all of the operators being tested in this test should have batching "
"rules implemented. If you are intentionally testing something to "
"do with the fallback path, use allowVmapFallbackUsage. Otherwise, "
"please make sure that batching rules are implemented for the "
"operator(s) being tested."
)
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings(record=True) as wa:
warnings.simplefilter("always")
with EnableVmapFallbackWarnings():
method(*args, **kwargs)
for captured_warning in wa:
self.assertNotRegex(
str(captured_warning.message), FALLBACK_REGEX, msg
)
return types.MethodType(wrapper, self)
@allowVmapFallbackUsage
def test_vmap_fallback_check_ok(self):
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
vmap(op_using_fallback)(torch.rand(3))
def test_vmap_fallback_check(self):
@self._wrap_method_with_vmap_fallback_check
def no_fallback(self):
pass
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
@self._wrap_method_with_vmap_fallback_check
def uses_fallback(self):
vmap(op_using_fallback)(torch.rand(3))
no_fallback(self)
with self.assertRaises(AssertionError):
uses_fallback(self)
class TestVmapOperatorsLegacy(Namespace.TestVmapBaseLegacy):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _test_unary(self, op, getter, device, *args, **kwargs):
test = functools.partial(self._vmap_test, *args, **kwargs)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(
vmap(op, in_dims=2),
[getter([2, 5, B0, B1, 3], device)],
in_dims=2,
out_dims=2,
)
def test_unary_pointwise_ops(self):
cases = [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
]
for op, getter in cases:
self._test_unary(op, getter, "cpu")
def test_clone(self):
# Some basic tests
self._test_unary(lambda x: x.clone(), TensorFactory.randn, "cpu")
self._test_unary(
lambda x: x.clone(memory_format=torch.preserve_format),
TensorFactory.randn,
"cpu",
)
self._test_unary(
lambda x: x.clone(memory_format=torch.contiguous_format),
TensorFactory.randn,
"cpu",
)
# Test that the per-examples are contiguous when using torch.contiguous_format
def clone_contiguous(x):
return x.clone(memory_format=torch.contiguous_format)
B0, B1 = 3, 5
x = torch.randn(2, B0, 7)
y = vmap(clone_contiguous, in_dims=1, out_dims=1)(x)
self.assertTrue(y.movedim(1, 0).is_contiguous())
self.assertTrue(y[:, 0, :].is_contiguous())
x = torch.randn(2, B0, 7, B1)
y = vmap(vmap(clone_contiguous, in_dims=2), in_dims=1)(x)
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
msg = r"only supported with memory_format torch.preserve_format or torch.contiguous_format"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(
torch.randn(B0)
)
def test_binary_pointwise_ops(self):
def get_number(getter):
return getter([]).item()
def make_case(op, input_getter=TensorFactory.randn):
return (op, input_getter)
cases = [
# Basic arithmetic
make_case(torch.add),
make_case(lambda x, y: x + y),
make_case(torch.sub),
make_case(lambda x, y: x - y),
make_case(torch.mul),
make_case(lambda x, y: x * y),
make_case(torch.div, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1),
make_case(torch.pow, input_getter=TensorFactory.randp1),
make_case(lambda x, y: x**y, input_getter=TensorFactory.randp1),
]
test = self._vmap_test
for op, getter in cases:
device = "cpu"
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(
op,
(getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1),
out_dims=1,
)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(
op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None)
)
# Nested vmap: op(Tensor, Tensor)
test(
vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device))
)
test(
vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)),
in_dims=(0, None),
)
# Python number overload: op(Tensor, Number) (and vice-versa)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
# Type promotion: op(Logical Scalar Tensor, Logical Scalar Tensor)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
# Type promotion: op(Tensor, Logical Scalar Tensor) (and vice-versa)
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
continue
# TODO(rzou): fix the following
# # Test cross-device scalars
# number = get_number(getter)
# self._test_unary(lambda t: op(t, number), getter, device='cuda')
# self._test_unary(lambda t: op(number, t), getter, device='cuda')
# self._test_unary(lambda t: op(t, torch.tensor(number)), getter, device='cuda')
def test_as_strided(self):
def _test(sizes, strides, offset, tensor, lambd):
result = vmap(lambda t: t.as_strided(sizes, strides, offset))(tensor)
expected = vmap(lambd)(tensor)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# single vmap test
B0 = 5
tensors = [
# contiguous
torch.randn(B0, 2, 3),
# non-contiguous
torch.randn(B0, 3, 2).transpose(1, 2),
# non-zero storage offset
torch.randn(2, B0, 2, 3)[1],
# non-contiguous strides, zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 0, :, 0],
# non-contiguous strides, non-zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 2, :, 1],
]
for x in tensors:
S0, S1 = x.stride()[1:]
offset = x.storage_offset()
# Broadcast
_test(
[5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3)
)
# transpose
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
# select
_test([2], [S0], offset + S1, x, lambda x: x[:, 1])
# Nested vmap test
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
result = vmap(
vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1
)(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# Check that mal-formatted size/strides doesn't crash
with self.assertRaisesRegex(
RuntimeError, "size and stride must have the same length"
):
x = torch.randn(B0, 2, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
# Sanity check #1: we require the batch dims to be at the front of the
# tensor (in memory layout).
msg = "batch dims being vmapped over are at the front of the tensor"
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([2, 3], [B0 * 3, 1]))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 2, 3, B1).movedim(3, 1)
vmap(vmap(lambda x: x.as_strided([2, 3], [B1 * 3, B1])))(x)
# All the Sanity check #2{a,b,c} cases check that
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# doesn't index memory that is out of bounds of xs[i]. This condition
# is important to the correctness of the as_strided batching rule
# (see NOTE: [When will the as_strided_batching_rule fail?])
# Sanity check #2a: The maximum indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is less than or equal to the maximum indexable location of xs[i].
msg = "This is not supported inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3)
vmap(lambda x: x.as_strided([3], [1], 1))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3, 5)
vmap(lambda x: x.as_strided([4, 4], [4, 1], 0))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, B1, 3, 5)
vmap(vmap(lambda x: x.as_strided([4, 4], [4, 1], 0)))(x)
# Sanity check #2b: The min indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is greater than or equal to the min indexable location of xs[i].
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3)[1]
vmap(lambda x: x.as_strided([3], [1], B0 * 3 - 1))(x)
# Sanity check #2c:
# xs[i] is a zero-dim tensor, but
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is not
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 0, 3)
vmap(lambda x: x.as_strided([3], [1]))(x)
def test_bmm(self):
op = torch.bmm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 3, 3, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(2, 5, 3)), in_dims=(0, None))
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, B0, 2, 3, 5), torch.rand(2, 5, 3)),
in_dims=(1, None),
)
# right arg is vmapped
test(op, (torch.rand(2, 5, 3), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(
vmap(op, in_dims=(None, 0)),
(torch.rand(2, 5, 3), torch.rand(B1, B0, 2, 3, 5)),
in_dims=(None, 1),
)
# both args are vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(B0, 2, 5, 3)))
test(
vmap(op),
(torch.rand(B1, B0, 2, 3, 5), torch.rand(B0, B1, 2, 5, 3)),
in_dims=(1, 0),
)
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 3, 5), torch.rand(B0, 2, 5, 3)),
in_dims=(None, 0),
)
def test_cat(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.cat(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 2), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(2), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(3, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(17, 2), torch.rand(17, 3, B0)), in_dims=(None, 2))
test(
vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 3)),
in_dims=(None, 0),
)
test(
vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 3)),
in_dims=(None, 0),
)
def test_conj(self):
op = torch.conj
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
test = self._vmap_test
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])], in_dims=2, out_dims=2)
# correctness tests
run_test(torch.float)
run_test(torch.cfloat)
# check that torch.conj on a non-complex tensor returns the same tensor
real_tensor = torch.randn(3)
result = vmap(op)(real_tensor)
self.assertEqual(result.data_ptr(), real_tensor.data_ptr())
def test_contiguous(self):
op = Tensor.contiguous
self._test_unary(op, TensorFactory.randn, "cpu")
# check that contiguous returns the original tensor if the per-examples
# are already contiguous
B0 = 3
x = torch.randn(B0, 2, 5, 7)
x = x.movedim(0, 2)
result = vmap(Tensor.contiguous, in_dims=2, out_dims=2)(x)
self.assertTrue(result is x)
msg = "NYI: querying is_contiguous inside of vmap for memory_format"
tensor = torch.randn(B0, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last_3d))(tensor)
def test_stride(self):
B0 = 3
x = torch.randn(B0, 2, 5, 7)
def foo(x):
assert x.stride() == (7 * 5, 7, 1)
return x
vmap(foo)(x)
x = torch.randn(2, B0, 5, 7).movedim(1, 0)
def bar(x):
assert x.stride() == (7 * 5 * B0, 7, 1)
return x
vmap(bar)(x)
def test_chunk(self):
test = self._vmap_view_test
op = torch.chunk
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 15, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 9, 1), in_dims=(1, None, None))
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 1023, B0, 5), 4, 0),
in_dims=(2, None, None),
)
test(
vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),),
in_dims=2,
)
def test_clamp(self):
clamp_cases = (
(lambda t: t.clamp(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp(max=0.5), TensorFactory.randn),
(lambda t: t.clamp(min=-0.5, max=0.5), TensorFactory.randn),
(lambda t: t.clamp_min(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp_max(max=0.5), TensorFactory.randn),
)
for op, getter in clamp_cases:
self._test_unary(op, getter, "cpu")
def test_comparison_ops(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
getter = TensorFactory.randn
B0, B1 = 7, 11
ops = (
torch.eq,
lambda x, y: x == y,
torch.gt,
lambda x, y: x > y,
torch.ge,
lambda x, y: x >= y,
torch.le,
lambda x, y: x <= y,
torch.lt,
lambda x, y: x < y,
torch.ne,
lambda x, y: x != y,
)
for op in ops:
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3]), getter([B0, 3])))
test(op, (getter([B0]), getter([B0, 2, 3])))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1), out_dims=1)
test(op, (getter([B0]), getter([2, 3])), in_dims=(0, None))
test(op, (getter([2, 3]), getter([B0, 3])), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3]), getter([B0, B1, 3])))
test(
vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3]), getter([B1, 3])),
in_dims=(0, None),
)
# test number as inputs
number = getter([]).item()
self._test_unary(
lambda t: op(t, number), getter, "cpu", check_propagates_grad=False
)
def test_diagonal(self):
tensor = torch.randn(3, 5, 7, 11, 13)
test = self._vmap_view_test
op = torch.diagonal
test(op, (tensor, 1, 0, 1), in_dims=(0, None, None, None))
test(op, (tensor, 0, 2, -1), in_dims=(0, None, None, None))
test(op, (tensor, 2, 1, 2), in_dims=(1, None, None, None))
test(op, (tensor, 0, -2, -1), in_dims=(1, None, None, None), out_dims=1)
test(vmap(lambda t: op(t, 0, 0, -1)), (tensor,), in_dims=1, out_dims=1)
test(
vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
(tensor,),
in_dims=1,
out_dims=1,
)
def test_dot(self):
op = torch.dot
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 5), torch.rand(5)), in_dims=(0, None))
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, B0, 5), torch.rand(5)),
in_dims=(1, None),
)
# right arg is vmapped
test(op, (torch.rand(5), torch.rand(B0, 5)), in_dims=(None, 0))
test(
vmap(op, in_dims=(None, 0)),
(torch.rand(5), torch.rand(B1, B0, 5)),
in_dims=(None, 1),
)
# both args are vmapped
test(op, (torch.rand(B0, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, 5), torch.rand(B0, 5)),
in_dims=(None, 0),
)
def test_expand_as(self):
op = torch.Tensor.expand_as
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 1, 5), torch.rand(B0, 2, 3, 5)))
test(op, (torch.rand(B0, 1, 5), torch.rand(2, 3, 5)), in_dims=(0, None))
test(op, (torch.rand(1, 5), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B0, B1, 2, 3, 5)))
test(
vmap(op),
(torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)),
in_dims=(0, 1),
)
test(vmap(op), (torch.rand(B0, B1), torch.rand(B1, 2, 3, 5)), in_dims=(0, None))
test(vmap(vmap(op)), (torch.rand(B0, B1, B2), torch.rand(B0, B1, B2, 2, 3, 5)))
def test_fill_and_zero_inplace(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
ops = (
lambda t: t.fill_(0.1),
lambda t: t.fill_(torch.tensor(0.2)),
lambda t: t.zero_(),
)
for op in ops:
# Single vmap, various in_dims / out_dims
test(op, [TensorFactory.randn([B0, 3])])
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2)
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [TensorFactory.randn([B0, B1])])
test(vmap(op), [TensorFactory.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(
vmap(op, in_dims=2),
[TensorFactory.randn([2, 5, B0, B1, 3])],
in_dims=2,
out_dims=2,
)
# test when value is a batched tensor for fill_ operator
B0, B1 = 3, 5
test(Tensor.fill_, [TensorFactory.randn([B0, B1]), TensorFactory.randn(B0)])
with self.assertRaisesRegex(
RuntimeError, r"output with shape .+ doesn't match the broadcast shape"
):
# Runtime Error is thrown when the tensor being written to isn't being vmapped over
vmap(Tensor.fill_, (None, 0))(
TensorFactory.randn([B0, B1]), TensorFactory.randn([B0])
)
def _test_complex_views(self, op, dtypes):
test = self._vmap_view_test
def run_test(op, dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([3, B0])], in_dims=1)
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, 3, B0])], in_dims=4)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])], in_dims=2, out_dims=2)
for dtype in dtypes:
run_test(op, dtype)
def test_real(self):
self._test_complex_views(torch.real, dtypes=[torch.cfloat, torch.cdouble])
def test_imag(self):
self._test_complex_views(torch.imag, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_real(self):
self._test_complex_views(
torch.view_as_real, dtypes=[torch.cfloat, torch.cdouble]
)
def test_view_as_complex(self):
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
op = torch.view_as_complex
test = self._vmap_view_test
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3, 2])])
test(op, [get([2, 5, B0, 3, 2])], in_dims=2)
test(op, [get([2, 5, B0, 3, 2])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1, 2])])
test(vmap(op), [get([B1, 2, 5, B0, 3, 2])], in_dims=2)
test(
vmap(op, in_dims=2), [get([2, 5, B0, B1, 3, 2])], in_dims=2, out_dims=2
)
# Interesting case #1: Batch dim directly before dim of size 2
test(op, [get([3, B0, 2])], in_dims=1)
test(vmap(op, in_dims=1), [get([3, B1, B0, 2])], in_dims=2)
# Interesting case #2: Batch dim at end of tensor, success cases
# view_as_complex requires that the dim with size 2 have stride 1
# in order for the view to function propertly
test(op, [get([B0, 2]).transpose(0, 1)], in_dims=1)
test(vmap(op, in_dims=1), [get([B0, B1, 2]).movedim(1, 2)])
test(vmap(op, in_dims=2), [get([B0, 3, B1, 2]).movedim(2, 3)])
# Interesting case #3: Batch dim at end of tensor, failure cases
msg = "Tensor must have a last dimension with stride 1"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([2, B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=1), in_dims=1)(get([2, B0, B1]))
# Invalid input: no dimension of size 2
msg = "Input tensor must have one or more dimensions"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(get([B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op))(get([B0, B1]))
# Invalid input: Batch dim has size 2, but the logical last dim does
# not have size 2
msg = "Tensor must have a last dimension of size 2"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([3, 2]))
for dtype in [torch.float, torch.double]:
run_test(dtype)
def test_is_complex(self):
ctensor = torch.randn(3, dtype=torch.cfloat)
tensor = torch.randn(3)
def foo(x):
if x.is_complex():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(ctensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(tensor), torch.tensor([0, 0, 0]))
def test_is_floating_point(self):
float_tensor = torch.tensor([1.0, 2.0, 3.0])
long_tensor = torch.tensor([1, 2, 3])
def foo(x):
if x.is_floating_point():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(long_tensor), torch.tensor([0, 0, 0]))
def test_is_contiguous(self):
def foo(x):
if x.is_contiguous():
return torch.tensor(1.0)
else:
return torch.tensor(0.0)
B0, B1 = 3, 5
# Single batch dim
contig = torch.randn(B0, 2, 7)
self.assertEqual(vmap(foo)(contig), torch.ones(B0))
noncontig = torch.randn(2, B0, 7)
self.assertEqual(vmap(foo, in_dims=1)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, B0, 7).movedim(1, 0)
self.assertEqual(vmap(foo)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, 7, B0)
self.assertEqual(vmap(foo, in_dims=2)(noncontig), torch.zeros(B0))
# Multiple batch dims
contig = torch.randn(B0, B1, 3)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3)
self.assertEqual(vmap(vmap(foo), in_dims=1)(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3).movedim(0, 1)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
noncontig = torch.randn(B0, 3, B1)
self.assertEqual(vmap(vmap(foo, in_dims=1))(noncontig), torch.zeros(B0, B1))
# is_contiguous on empty tensor is True
def bar(x):
assert x.is_contiguous()
return x
vmap(bar)(torch.randn(B0, 0, 3))
vmap(bar, in_dims=1)(torch.randn(0, B0, 3))
vmap(bar)(torch.randn(B0, 0, 3).mT)
# is_contiguous with other memory formats
def baz(x, memory_format):
x.is_contiguous(memory_format=memory_format)
return x
msg = "NYI: querying is_contiguous inside of vmap for memory_format"
tensor = torch.randn(B0, 2, 7, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last_3d))(tensor)
def test_movedim(self):
op = torch.movedim
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
# movedim(tensor, int, int) variant
test(op, (torch.rand(B0, 2, 5), 0, 1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 0, 1), in_dims=(1, None, None))
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5), 0, 1),
in_dims=(2, None, None),
)
test(
vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), 0, 1),
in_dims=(2, None, None),
)
# movedim(tensor, intlist, intlist) variant
test(op, (torch.rand(B0, 2, 3, 5), [1, 0], [0, 2]), in_dims=(0, None, None))
test(op, (torch.rand(2, 3, B0, 5), [1, 0], [0, 2]), in_dims=(1, None, None))
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]),
in_dims=(2, None, None),
)
test(
vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]),
in_dims=(2, None, None),
)
def test_mm(self):
op = torch.mm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5, 2)), in_dims=(0, None))
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, B0, 2, 5), torch.rand(5, 2)),
in_dims=(1, None),
)
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
test(
vmap(op, in_dims=(None, 0)),
(torch.rand(2, 5), torch.rand(B1, B0, 5, 2)),
in_dims=(None, 1),
)
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5, 2)))
test(
vmap(op),
(torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5, 2)),
in_dims=(1, 0),
)
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5, 2)),
in_dims=(None, 0),
)
def test_mv(self):
op = torch.mv
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5)), in_dims=(0, None))
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, B0, 2, 5), torch.rand(5)),
in_dims=(1, None),
)
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
test(
vmap(op, in_dims=(None, 0)),
(torch.rand(2, 5), torch.rand(B1, B0, 5)),
in_dims=(None, 1),
)
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5)))
test(
vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0)
)
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5)),
in_dims=(None, 0),
)
def test_narrow(self):
op = torch.narrow
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), -1, 1, 3), in_dims=(0, None, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1, 3), in_dims=(1, None, None, None))
test(
vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5), 1, 0, 0),
in_dims=(2, None, None, None),
)
test(
vmap(
vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)
),
(torch.rand(B1, 2, B0, 5, B2), -1, 2, 3),
in_dims=(2, None, None, None),
)
def test_new_empty(self):
# Empty is non-deterministic so we just check that the shape of the
# output tensor is what we expect and that the vmap fallback isn't used.
op = Tensor.new_empty
B0, B1 = 7, 11
result = vmap(lambda x: op(x, [2, 3]))(torch.randn(B0))
self.assertEqual(result.shape, [B0, 2, 3])
result = vmap(lambda x: op(x, []))(torch.randn(B0))
self.assertEqual(result.shape, [B0])
result = vmap(vmap(lambda x: op(x, [2, 3])))(torch.randn(B0, B1))
self.assertEqual(result.shape, [B0, B1, 2, 3])
def test_new_empty_strided(self):
# Empty is non-deterministic so we just check that the size and shape
# of the output are what we expect and that the vmap fallback isn't used
B0, B1 = 7, 11
def _test_single_vmap(size, stride, B0):
x = torch.randn(B0)
result = vmap(lambda x: x.new_empty_strided(size, stride))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0] + size)
self.assertEqual(result.stride(), [S] + stride)
def _test_double_vmap(size, stride, B0, B1):
x = torch.randn(B0, B1)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
x = torch.randn(B1, B0)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)), in_dims=1)(
x
)
S = x.new_empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
# contiguous case
_test_single_vmap([2, 3, 5], [3 * 5, 5, 1], B0)
_test_double_vmap([2, 3, 5], [3 * 5, 5, 1], B0, B1)
# expanded
_test_single_vmap([2, 3, 5], [0, 5, 1], B0)
_test_double_vmap([2, 3, 5], [0, 5, 1], B0, B1)
# some of these cases are pretty strange, just verifying that if
# empty_strided allows them then BatchedTensor.new_empty_strided
# can as well
for shape in [[2, 3, 4], [0, 2, 0]]:
for strides in [[12, 4, 1], [2, 4, 6], [0, 0, 0]]:
_test_single_vmap(shape, strides, B0)
_test_double_vmap(shape, strides, B0, B1)
def test_new_zeros(self):
op = Tensor.new_zeros
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
test(lambda x: op(x, 2, 3), (torch.rand(B0),))
test(lambda x: op(x, []), (torch.rand(B0),))
test(vmap(lambda x: op(x, 3, 5)), (torch.rand(B0, B1),))
def test_select(self):
op = torch.select
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), 0, 0), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1), in_dims=(1, None, None))
test(vmap(lambda t: op(t, 1, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(
vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)),
(torch.rand(B1, 2, B0, B2, 5),),
in_dims=2,
)
def test_stack(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.stack(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 3), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(3), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(
vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 2)),
in_dims=(None, 0),
)
test(
vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 2)),
in_dims=(None, 0),
)
def test_slice(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda t: t[0:1], (torch.rand(B0, 3, 5),))
test(lambda t: t[:, 1:3], (torch.rand(3, 5, B0),), in_dims=2)
test(
vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2
)
test(
vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
(torch.rand(3, 5, B0, B1, B2),),
in_dims=2,
)
def test_squeeze(self):
test = self._vmap_view_test
op = torch.squeeze
B0, B1 = 1, 11
test(op, (torch.rand(B0),))
test(op, (torch.rand(B0, 3, 5),))
test(op, (torch.rand(1, B0, 5),), in_dims=1)
test(op, (torch.rand(B0, 0, 1, 5, 1),))
test(op, (torch.rand(B0, 1, 1, 1, 1),))
test(vmap(op), (torch.rand(B0, B1, 1),))
test(vmap(op), (torch.rand(B1, 1, B0),), in_dims=2)
def test_sum_dim(self):
test = self._vmap_test
B0, B1 = 5, 7
# Single vmap, various in_dims / out_dims
test(lambda x: x.sum(()), [torch.randn([B0])])
test(lambda x: x.sum(()), [torch.randn([B0, 2])])
test(lambda x: x.sum(0), [torch.randn([B0])])
test(lambda x: x.sum(-1), [torch.randn([B0])])
test(lambda x: x.sum(0), [torch.randn([B0, 3])])
test(lambda x: x.sum(-1), [torch.randn([2, 5, B0, 3])], in_dims=2)
test(lambda x: x.sum(2), [torch.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(lambda x: x.sum(())), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(0)), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(-1)), [torch.randn([B0, B1])])
test(vmap(lambda x: x.sum(-2)), [torch.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(
vmap(lambda x: x.sum(2), in_dims=2),
[torch.randn([2, 5, B0, B1, 3])],
in_dims=2,
out_dims=2,
)
def test_reshape(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.reshape
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None), check_view=True)
test(
op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False
)
test(
vmap(lambda t: t.reshape([-1])),
(torch.rand(B0, B1, 2, 5),),
check_view=True,
)
test(
vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
(torch.rand(3, B1, 2, B2, 5, B0),),
in_dims=5,
check_view=False,
)
def test_reshape_as(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.reshape_as
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)), check_view=True)
test(
op,
(torch.rand(2 * 5), torch.rand(B0, 2, 5)),
in_dims=(None, 0),
check_view=True,
)
test(
op,
(torch.rand(B0, 2 * 5), torch.rand(2, 5)),
in_dims=(0, None),
check_view=True,
)
test(
op,
(torch.rand(2, B0, 5), torch.rand(1, 1, 10)),
in_dims=(1, None),
check_view=False,
)
test(
vmap(op),
(torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)),
check_view=True,
)
test(
vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
(torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
in_dims=(5, 0),
check_view=False,
)
def test_result_type(self):
def scalar_tensor_with_dtype(op):
def wrapped(*args, **kwargs):
dtype = op(*args, **kwargs)
return torch.ones([], dtype=dtype)
return wrapped
test = self._vmap_test
op = scalar_tensor_with_dtype(torch.result_type)
B0 = 2
test(
op,
(torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False,
)
test(
op,
(torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False,
)
test(lambda x: op(x, 1), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0),), check_propagates_grad=False)
test(
lambda x: op(x, torch.tensor(1)),
(torch.randn(B0),),
check_propagates_grad=False,
)
test(
lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0),),
check_propagates_grad=False,
)
test(
op,
(torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
check_propagates_grad=False,
)
test(
op,
(torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
check_propagates_grad=False,
)
test(lambda x: op(x, 1), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0, 2),), check_propagates_grad=False)
test(
lambda x: op(x, torch.tensor(1)),
(torch.randn(B0, 2),),
check_propagates_grad=False,
)
test(
lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0, 2),),
check_propagates_grad=False,
)
test(
op,
(torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False,
)
test(
op,
(torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False,
)
@skipIfTorchDynamo("too slow")
def test_tensor_split(self):
test = self._vmap_view_test
op = torch.tensor_split
B0, B1, B2 = 7, 11, 13
# tests for torch.tensor_split(self, indices_or_sections: int, dim)
test(op, (torch.rand(B0, 2, 1024), 5, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 150, 1), in_dims=(1, None, None))
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None),
)
test(
vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),),
in_dims=2,
)
# tests for torch.tensor_split(self, indices_or_sections: List[int], dim)
test(
op,
(torch.rand(B0, 2, 1024), [50, 100, 378, 890], -1),
in_dims=(0, None, None),
)
test(
op,
(torch.rand(2, B0, 1024), [50, 100, 212, 345, 0, 378, 890], 1),
in_dims=(1, None, None),
)
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 1023, B0, 5), [50, 100, 212, 345, 0, 378, 890], 0),
in_dims=(2, None, None),
)
test(
vmap(vmap(lambda t: op(t, [4, 8, 9, 34, 29], 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),),
in_dims=2,
)
def test_split(self):
test = self._vmap_view_test
op = torch.split
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 101, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 130, 1), in_dims=(1, None, None))
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None),
)
test(
vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),),
in_dims=2,
)
# tests for torch.split(self, split_size: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [1, 1020, 3], -1), in_dims=(0, None, None))
test(
op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None)
)
test(
vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
in_dims=(2, None, None),
)
test(
vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),),
in_dims=2,
)
def test_trace(self):
op = torch.trace
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_transpose(self):
op = torch.transpose
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda x: op(x, 0, 1), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, -1, -2), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, 3, 1), (torch.rand(B0, 2, 5, 4, 6),))
test(lambda x: op(x, 1, 0), (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(lambda x: op(x, 0, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(
vmap(vmap(lambda x: op(x, 0, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 5, B2),),
in_dims=2,
)
# Special case: scalar tensor
for dim1, dim2 in itertools.product([0, -1], [0, -1]):
x = torch.rand(B0)
result = vmap(lambda x: op(x, dim1, dim2))(x)
self.assertTrue(result is x)
def test_t(self):
op = torch.t
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_T_numpy(self):
def op(t):
return t.T
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 3, 5),))
test(op, (torch.rand(2, B0, 3, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(op), (torch.rand(B1, 2, B0, 3, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 3, B2, 5),), in_dims=2)
def test_to(self):
test = self._vmap_test
B0, B1 = 7, 11
test(lambda t: t.to("cpu"), (torch.rand(B0),))
test(lambda t: t.to(torch.double), (torch.rand(B0),))
test(
lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64))
)
test(
lambda t, o: t.to(o),
(torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
in_dims=(0, None),
)
test(vmap(lambda t: t.to(torch.double)), (torch.rand(B0, B1, 3),))
# also test some casting methods
test(lambda t: t.double(), (torch.rand(B0),))
test(lambda t: t.float(), (torch.rand(B0),))
test(lambda t: t.int(), (torch.rand(B0),), check_propagates_grad=False)
test(lambda t: t.long(), (torch.rand(B0),), check_propagates_grad=False)
def test_unfold(self):
op = torch.Tensor.unfold
test = self._vmap_view_test
B0, B1, B2 = 3, 2, 5
test(op, (torch.rand(B0, 7, 11), 0, 2, 1), in_dims=(0, None, None, None))
test(op, (torch.rand(7, B0, 11), 1, 4, 2), in_dims=(1, None, None, None))
test(
vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11), 1, 5, 1),
in_dims=(2, None, None, None),
)
test(
vmap(
vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)
),
(torch.rand(B1, 7, B0, 11, B2), -1, 2, 4),
in_dims=(2, None, None, None),
)
def test_unbind(self):
test = self._vmap_view_test
op = torch.unbind
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 1024), -1), in_dims=(0, None))
test(op, (torch.rand(B0, 2, 0),))
test(op, (torch.rand(2, B0, 7), 0), in_dims=(1, None))
test(
vmap(op, in_dims=(0, None)),
(torch.rand(B1, 1023, B0, 5), 1),
in_dims=(2, None),
)
test(
vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
(torch.rand(B1, 2, B0, 32, B2),),
in_dims=2,
)
def test_view(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, None))(torch.rand(2, B0, 5), [10])
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), [1, 2, 1, 10]), in_dims=(0, None))
test(vmap(lambda t: t.view([-1])), (torch.rand(B0, B1, 2, 5, 3),))
test(
vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
(torch.rand(B2, B0, B1, 3, 2, 5),),
in_dims=1,
)
def test_view_as(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view_as
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, 0))(torch.rand(2, B0, 5), torch.rand(B0, 10))
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)))
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0))
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), torch.rand(2, 1, 1, 10)), in_dims=(0, None))
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)))
test(
vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
(torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
in_dims=(2, 0),
)
def test_no_random_op_support(self):
B0 = 2
captured = torch.rand(3)
random_ops = [
# out-of-place on BatchedTensor
(torch.bernoulli, (torch.rand(B0, 1),)),
(lambda t: torch.bernoulli(t, p=0.5), (torch.rand(B0, 1),)),
(lambda t: torch.multinomial(t, 2), (torch.rand(B0, 3),)),
(torch.normal, (torch.randn(B0, 1), torch.randn(B0, 1))),
(lambda t: torch.normal(t, 1.0), (torch.randn(B0, 1),)),
(lambda t: torch.normal(0.0, t), (torch.randn(B0, 1),)),
(torch.poisson, (torch.rand(B0, 1),)),
(torch.rand_like, (torch.rand(B0, 1),)),
(torch.randn_like, (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 2), (torch.rand(B0, 1),)),
(lambda t: torch.randint_like(t, 0, 2), (torch.rand(B0, 1),)),
# out-of-place on captured tensor
(lambda t: torch.bernoulli(captured), (torch.rand(B0),)),
(lambda t: torch.bernoulli(captured, p=0.5), (torch.rand(B0),)),
(lambda t: torch.multinomial(captured, 2), (torch.rand(B0),)),
(lambda t: torch.normal(captured, captured), (torch.randn(B0),)),
(lambda t: torch.normal(captured, 1.0), (torch.randn(B0),)),
(lambda t: torch.normal(0.0, captured), (torch.randn(B0),)),
(lambda t: torch.poisson(captured), (torch.rand(B0),)),
(lambda t: torch.rand_like(captured), (torch.rand(B0),)),
(lambda t: torch.randn_like(captured), (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 2), (torch.rand(B0),)),
(lambda t: torch.randint_like(captured, 0, 2), (torch.rand(B0),)),
# in-place on BatchedTensor
(lambda t: t.bernoulli_(), (torch.randn(B0, 1),)),
(lambda t: t.cauchy_(), (torch.randn(B0, 1),)),
(lambda t: t.exponential_(), (torch.randn(B0, 1),)),
(lambda t: t.geometric_(0.5), (torch.randn(B0, 1),)),
(lambda t: t.log_normal_(), (torch.randn(B0, 1),)),
(lambda t: t.normal_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(), (torch.randn(B0, 1),)),
(lambda t: t.random_(0, 2), (torch.randn(B0, 1),)),
(lambda t: t.random_(2), (torch.randn(B0, 1),)),
(lambda t: t.uniform_(), (torch.randn(B0, 1),)),
# in-place on captured tensor
(lambda t: captured.bernoulli_(), (torch.randn(B0),)),
(lambda t: captured.cauchy_(), (torch.randn(B0),)),
(lambda t: captured.exponential_(), (torch.randn(B0),)),
(lambda t: captured.geometric_(0.5), (torch.randn(B0),)),
(lambda t: captured.log_normal_(), (torch.randn(B0),)),
(lambda t: captured.normal_(), (torch.randn(B0),)),
(lambda t: captured.random_(), (torch.randn(B0),)),
(lambda t: captured.random_(0, 2), (torch.randn(B0),)),
(lambda t: captured.random_(2), (torch.randn(B0),)),
(lambda t: captured.uniform_(), (torch.randn(B0),)),
# factory functions
(lambda t: torch.rand(1), (torch.randn(B0),)),
(lambda t: torch.randn(1), (torch.randn(B0),)),
(lambda t: torch.randint(5, [1]), (torch.randn(B0),)),
(lambda t: torch.randperm(5), (torch.randn(B0),)),
]
for op, args in random_ops:
with self.assertRaisesRegex(
RuntimeError, "vmap: We do not yet support calling random operations"
):
vmap(op)(*args)
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests
import types
FALLBACK_REGEX = r'There is a performance drop'
|
import functools
import itertools
import types
import warnings
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
FALLBACK_REGEX = r"There is a performance drop"
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_legacy_vmap.py
|
_get_rand_no_zeros
|
def _get_rand_no_zeros(*args, **kwargs):
requires_grad = kwargs.get('requires_grad', False)
kwargs_without_requires_grad = kwargs.copy()
kwargs_without_requires_grad['requires_grad'] = False
result = torch.rand(*args, **kwargs_without_requires_grad)
return result.clamp_min_(0.1).requires_grad_(requires_grad)
class TestVmapBatchedGradient(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
# Tests batched gradient computation of outputs = op(*args, **kwargs)
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
def _batched_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
batched_vectors = tuple(construct_v(out, batch_size) for out in outputs)
def vector_jacobian_product(*vectors):
return torch.autograd.grad(outputs, differentiable(args), vectors,
retain_graph=True)
self._vmap_test(vector_jacobian_product, batched_vectors,
check_propagates_grad=False)
# Tests batched second grad computation of outputs = op(*args, **kwargs).
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
#
# NB: we only test computing batched gradients in the second gradient
# computation. One specific use case that does this is computing the hessian
# matrix of a scalar-valued function; this is useful in Bayesian Logistic
# Regression.
# It might be useful to have a test that computes batched first gradients and
# then uses those to compute batched second gradients in the future.
def _batched_grad_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
# Same thing as summing together all of the outputs and calling .backward()
first_grads = torch.autograd.grad(outputs, differentiable(args), ones,
create_graph=True)
first_grads = differentiable(first_grads)
self.assertNotEqual(
len(first_grads), 0, "None of the first grads depend on the input!")
batched_vectors = tuple(construct_v(grad, batch_size) for grad in first_grads)
def vector_hessian_product(*vectors):
outputs = torch.autograd.grad(first_grads, differentiable(args), vectors,
retain_graph=True, allow_unused=True)
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
self._vmap_test(vector_hessian_product, batched_vectors,
check_propagates_grad=False)
def _test_arithmetic(self, op, device, test_grad_grad=True):
x = torch.randn(2, 3, requires_grad=True, device=device)
y = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
scalar = 3.14
self._batched_grad_test(op, (x, y))
self._batched_grad_test(op, (scalar, y))
self._batched_grad_test(op, (x, scalar))
if test_grad_grad:
self._batched_grad_grad_test(op, (x, y))
def test_add(self, device):
self._test_arithmetic(torch.add, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x + y, device, test_grad_grad=False)
def test_sub(self, device):
self._test_arithmetic(torch.sub, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x - y, device, test_grad_grad=False)
def test_mul(self, device):
self._test_arithmetic(torch.mul, device)
self._test_arithmetic(lambda x, y: x * y, device)
def test_div(self, device):
self._test_arithmetic(torch.div, device)
self._test_arithmetic(lambda x, y: x / y, device)
@allowVmapFallbackUsage
def test_binary_cross_entropy(self, device):
x = torch.sigmoid(torch.randn(3, 2, device=device, requires_grad=True))
target = torch.rand(3, 2, device=device)
op = functools.partial(F.binary_cross_entropy, target=target)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_expand(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
def op(x):
return x.expand(5, 5, 2, 3)
self._batched_grad_test(op, (x,))
@allowVmapFallbackUsage
def test_index(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
index = torch.tensor([[0, 0], [1, 1]], device=device)
def op(x):
y = x * x
return y[index]
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_lgamma(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.lgamma, (x,))
self._batched_grad_grad_test(Tensor.lgamma, (x,))
def test_log(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log, (x,))
self._batched_grad_grad_test(torch.log, (x,))
def test_logsumexp(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
def op(x):
return torch.logsumexp(x, -1)
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_log1p(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log1p, (x,))
self._batched_grad_grad_test(torch.log1p, (x,))
@allowVmapFallbackUsage
def test_max(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.max, (x,))
@allowVmapFallbackUsage
def test_median(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.median, (x,))
@allowVmapFallbackUsage
def test_min(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.min, (x,))
def test_permute(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.permute(2, 0, 1)
self._batched_grad_test(op, (x,))
def test_reshape(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.reshape([2 * 3, 5])
self._batched_grad_test(op, (x,))
def test_sigmoid(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.sigmoid, (x,))
self._batched_grad_grad_test(Tensor.sigmoid, (x,))
def test_stack(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
y = torch.randn(2, 3, device=device, requires_grad=True)
def op(x, y):
return torch.stack([x, y])
self._batched_grad_test(op, (x, y))
def test_select(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[1], (x,))
self._batched_grad_test(lambda x: x.select(1, 2), (x,))
self._batched_grad_test(lambda x: x.select(-1, 0), (x,))
def test_slice(self, device):
x = torch.randn(2, 3, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[0:1], (x,))
self._batched_grad_test(lambda x: x[:, 1:3], (x,))
self._batched_grad_test(lambda x: x[..., 1:3], (x,))
def test_trace(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(Tensor.trace, (x,))
def test_threshold(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: F.threshold(x, 0.5, 0.0), (x,))
@allowVmapFallbackUsage
def test_inplace_on_view(self, device):
leaf = torch.randn(4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base[0]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
@allowVmapFallbackUsage
def test_inplace_manyview(self, device):
leaf = torch.randn(4, 4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base.transpose(0, 2)
view = view[1]
view = view.diagonal()
view = view[::2]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
def test_diagonal(self, device):
x = torch.randn(4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(1, 0, 1), (x,))
x = torch.randn(3, 4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(0, -1, -2), (x,))
@allowVmapFallbackUsage
def test_unrelated_output(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
res, = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
@allowVmapFallbackUsage
def test_unrelated_output_multiple_grad(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
res, = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
_ = vjp(gy[0])
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
instantiate_device_type_tests(
TestVmapBatchedGradient,
globals(),
None,
)
if __name__ == '__main__':
run_tests()
|
def _get_rand_no_zeros(*args, **kwargs):
requires_grad = kwargs.get("requires_grad", False)
kwargs_without_requires_grad = kwargs.copy()
kwargs_without_requires_grad["requires_grad"] = False
result = torch.rand(*args, **kwargs_without_requires_grad)
return result.clamp_min_(0.1).requires_grad_(requires_grad)
class TestVmapBatchedGradientLegacy(Namespace.TestVmapBaseLegacy):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
# Tests batched gradient computation of outputs = op(*args, **kwargs)
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
def _batched_grad_test(
self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3
):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
batched_vectors = tuple(construct_v(out, batch_size) for out in outputs)
def vector_jacobian_product(*vectors):
return torch.autograd.grad(
outputs, differentiable(args), vectors, retain_graph=True
)
self._vmap_test(
vector_jacobian_product, batched_vectors, check_propagates_grad=False
)
# Tests batched second grad computation of outputs = op(*args, **kwargs).
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
#
# NB: we only test computing batched gradients in the second gradient
# computation. One specific use case that does this is computing the hessian
# matrix of a scalar-valued function; this is useful in Bayesian Logistic
# Regression.
# It might be useful to have a test that computes batched first gradients and
# then uses those to compute batched second gradients in the future.
def _batched_grad_grad_test(
self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3
):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
# Same thing as summing together all of the outputs and calling .backward()
first_grads = torch.autograd.grad(
outputs, differentiable(args), ones, create_graph=True
)
first_grads = differentiable(first_grads)
self.assertNotEqual(
len(first_grads), 0, "None of the first grads depend on the input!"
)
batched_vectors = tuple(construct_v(grad, batch_size) for grad in first_grads)
def vector_hessian_product(*vectors):
outputs = torch.autograd.grad(
first_grads,
differentiable(args),
vectors,
retain_graph=True,
allow_unused=True,
)
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
self._vmap_test(
vector_hessian_product, batched_vectors, check_propagates_grad=False
)
def _test_arithmetic(self, op, device, test_grad_grad=True):
x = torch.randn(2, 3, requires_grad=True, device=device)
y = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
scalar = 3.14
self._batched_grad_test(op, (x, y))
self._batched_grad_test(op, (scalar, y))
self._batched_grad_test(op, (x, scalar))
if test_grad_grad:
self._batched_grad_grad_test(op, (x, y))
def test_add(self, device):
self._test_arithmetic(torch.add, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x + y, device, test_grad_grad=False)
def test_sub(self, device):
self._test_arithmetic(torch.sub, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x - y, device, test_grad_grad=False)
def test_mul(self, device):
self._test_arithmetic(torch.mul, device)
self._test_arithmetic(lambda x, y: x * y, device)
def test_div(self, device):
self._test_arithmetic(torch.div, device)
self._test_arithmetic(lambda x, y: x / y, device)
@allowVmapFallbackUsage
def test_binary_cross_entropy(self, device):
x = torch.sigmoid(torch.randn(3, 2, device=device, requires_grad=True))
target = torch.rand(3, 2, device=device)
op = functools.partial(F.binary_cross_entropy, target=target)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_expand(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
def op(x):
return x.expand(5, 5, 2, 3)
self._batched_grad_test(op, (x,))
@allowVmapFallbackUsage
def test_index(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
index = torch.tensor([[0, 0], [1, 1]], device=device)
def op(x):
y = x * x
return y[index]
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_lgamma(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.lgamma, (x,))
self._batched_grad_grad_test(Tensor.lgamma, (x,))
def test_log(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log, (x,))
self._batched_grad_grad_test(torch.log, (x,))
def test_logsumexp(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
def op(x):
return torch.logsumexp(x, -1)
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_log1p(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log1p, (x,))
self._batched_grad_grad_test(torch.log1p, (x,))
@allowVmapFallbackUsage
def test_max(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.max, (x,))
@allowVmapFallbackUsage
def test_median(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.median, (x,))
@allowVmapFallbackUsage
def test_min(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.min, (x,))
def test_permute(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.permute(2, 0, 1)
self._batched_grad_test(op, (x,))
def test_reshape(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.reshape([2 * 3, 5])
self._batched_grad_test(op, (x,))
def test_sigmoid(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.sigmoid, (x,))
self._batched_grad_grad_test(Tensor.sigmoid, (x,))
def test_stack(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
y = torch.randn(2, 3, device=device, requires_grad=True)
def op(x, y):
return torch.stack([x, y])
self._batched_grad_test(op, (x, y))
def test_select(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[1], (x,))
self._batched_grad_test(lambda x: x.select(1, 2), (x,))
self._batched_grad_test(lambda x: x.select(-1, 0), (x,))
def test_slice(self, device):
x = torch.randn(2, 3, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[0:1], (x,))
self._batched_grad_test(lambda x: x[:, 1:3], (x,))
self._batched_grad_test(lambda x: x[..., 1:3], (x,))
def test_trace(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(Tensor.trace, (x,))
def test_threshold(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: F.threshold(x, 0.5, 0.0), (x,))
@allowVmapFallbackUsage
def test_inplace_on_view(self, device):
leaf = torch.randn(4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base[0]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
@allowVmapFallbackUsage
def test_inplace_manyview(self, device):
leaf = torch.randn(4, 4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base.transpose(0, 2)
view = view[1]
view = view.diagonal()
view = view[::2]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
def test_diagonal(self, device):
x = torch.randn(4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(1, 0, 1), (x,))
x = torch.randn(3, 4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(0, -1, -2), (x,))
@allowVmapFallbackUsage
def test_unrelated_output(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
(res,) = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
@allowVmapFallbackUsage
def test_unrelated_output_multiple_grad(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
(res,) = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
_ = vjp(gy[0])
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
instantiate_device_type_tests(TestVmapBatchedGradientLegacy, globals(), None)
if __name__ == "__main__":
run_tests()
|
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests
import types
FALLBACK_REGEX = r'There is a performance drop'
|
import functools
import itertools
import types
import warnings
import torch
import torch.nn.functional as F
from torch import Tensor
from torch._vmap_internals import vmap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
FALLBACK_REGEX = r"There is a performance drop"
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
setLinalgBackendsToDefaultFinally
|
def setLinalgBackendsToDefaultFinally(fn):
@wraps(fn)
def _fn(*args, **kwargs):
try:
fn(*args, **kwargs)
finally:
# Set linalg backend back to default to make sure potential failures in one test
# doesn't affect other linalg tests
torch.backends.cuda.preferred_linalg_library('default')
return _fn
@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super(self.__class__, self).tearDown()
exact_dtype = True
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})
@tf32_on_and_off(5e-3)
def test_inner(self, device, dtype):
def check(a_sizes_, b_sizes_):
for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):
a = torch.randn(a_sizes, dtype=dtype, device=device)
b = torch.randn(b_sizes, dtype=dtype, device=device)
res = torch.inner(a, b)
ref = np.inner(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))
out = torch.zeros_like(res)
torch.inner(a, b, out=out)
self.assertEqual(res, out)
check([], []) # scalar x scalar
check([], [0]) # scalar x empty
check([], [3]) # scalar x 1D
check([], [2, 3, 4]) # scalar x 3D
check([0], [0]) # empty x empty
check([0], [2, 0]) # empty x 2D
check([2], [2]) # 1D x 1D
check([2], [3, 1, 2]) # 1D x 3D
check([2], [3, 0, 2]) # 1D x 3D empty
check([1, 2], [3, 2]) # 2D x 2D
check([1, 2], [3, 4, 2]) # 2D x 3D
check([2, 1, 3, 2], [1, 3, 2, 2]) # 4D x 4D
# Test error message
with self.assertRaisesRegex(RuntimeError,
r"inner\(\) the last dimension must match on both "
r"input tensors but got shapes \[2, 3\] and \[2, 2\]"):
torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
# Tests torch.outer, and its alias, torch.ger, vs. NumPy
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_outer(self, device, dtype):
def run_test_case(a, b):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
exact_dtype = True
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)
# test out variant
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
# test 0 strided tensor
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
def test_matrix_rank_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.matrix_rank(a)
def test_solve_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
b = make_tensor(5, 1, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.solve(b, a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
b.solve(a)
def test_eig_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.eig(a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
a.eig()
def test_symeig_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.symeig(a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
a.symeig()
def test_lstsq_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.lstsq(a, a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
a.lstsq(a)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
if self.device_type == 'cpu':
drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)
else:
drivers = ('gels', None)
def check_solution_correctness(a, b, sol):
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)
def check_correctness_ref(a, b, res, ref, driver="default"):
def apply_if_not_empty(t, f):
if t.numel():
return f(t)
else:
return t
def select_if_not_empty(t, i):
selected = apply_if_not_empty(t, lambda x: x.select(0, i))
return selected
m = a.size(-2)
n = a.size(-1)
nrhs = b.size(-1)
batch_size = int(np.prod(a.shape[:-2]))
if batch_size == 0:
batch_size = 1
a_3d = a.view(batch_size, m, n)
b_3d = b.view(batch_size, m, nrhs)
solution_3d = res.solution.view(batch_size, n, nrhs)
residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))
rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))
singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])
if a.numel() > 0:
for i in range(batch_size):
sol, residuals, rank, singular_values = ref(
a_3d.select(0, i).numpy(),
b_3d.select(0, i).numpy()
)
# Singular values are None when lapack_driver='gelsy' in SciPy
if singular_values is None:
singular_values = []
self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)
self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)
self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)
# SciPy and NumPy operate only on non-batched input and
# return an empty array with shape (0,) if rank(a) != n
# in PyTorch the batched inputs are supported and
# matrices in the batched input can have different ranks
# we compute residuals only if all matrices have rank == n
# see https://github.com/pytorch/pytorch/issues/56483
if m > n:
if torch.all(rank_1d == n):
self.assertEqual(
residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False
)
else:
self.assertTrue(residuals_2d.numel() == 0)
else:
self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))
self.assertEqual(res.rank.shape, a.shape[:-2])
# residuals are not always computed (and have non-zero shape)
if m > n and driver != "gelsy":
self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))
else:
self.assertEqual(res.residuals.shape, (0, ))
# singular_values are not always computed (and have non-zero shape)
if driver == "default" or driver == "gelsd" or driver == "gelss":
self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))
else:
self.assertEqual(res.singular_values.shape, (0, ))
def check_correctness_scipy(a, b, res, driver, cond):
# SciPy provides 3 driver options: gelsd, gelss, gelsy
if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):
import scipy.linalg
def scipy_ref(a, b):
return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)
check_correctness_ref(a, b, res, scipy_ref, driver=driver)
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_available = (version >= (10, 2))
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (cusolver_available or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
# so we skip this case
continue
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_batch_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
def check_correctness(a, b):
sol = torch.linalg.lstsq(a, b).solution
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)
ms = [2 ** i for i in range(5)]
batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]
# the case when a single matrix is batch-broadcasted over the rhs
for m, batch in itertools.product(ms, batches):
a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)
b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)
check_correctness(a, b)
# cases with broadcastable shapes
for m in ms:
a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)
b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)
# unsqueeze for b because `check_correctness` checks against
# a.pinverse() @ b, which requires b to be a matrix
check_correctness(a, b.unsqueeze(-1))
a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)
b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_input_checks(self, device, dtype):
# check empty inputs
# empty batches
a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)
b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a but not b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)
)
# empty a and b
if torch.device(device).type == 'cpu':
# only CPU since CUDA does not support overdetermined systems
a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)
)
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):
torch.linalg.lstsq(b, b)
with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):
torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-1\)'):
torch.linalg.lstsq(a, b)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b.unsqueeze(-1))
def complement_device(device):
if device == 'cpu' and torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))
if a.device != b.device:
with self.assertRaisesRegex(RuntimeError, 'be on the same device'):
torch.linalg.lstsq(a, b)
b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()
with self.assertRaisesRegex(RuntimeError, 'the same dtype'):
torch.linalg.lstsq(a, b)
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=device)
if device != 'cpu':
with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# if on cpu
else:
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# cuSOLVER path supports underdetermined systems
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_not_available = (version < (10, 1))
if device != 'cpu' and cusolver_not_available:
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(2, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):
torch.linalg.lstsq(a, b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
expected_L = np.linalg.cholesky(A.cpu().numpy())
actual_L = torch.linalg.cholesky(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
larger_input_case = [(100, (5, ), True)]
for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:
run_test(shape, batch, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)
out = torch.empty_like(A)
ans = torch.linalg.cholesky(A, out=out)
self.assertEqual(ans, out)
expected = torch.linalg.cholesky(A)
self.assertEqual(expected, out)
# check the upper= variant
expected = torch.linalg.cholesky(A).mH
actual = torch.linalg.cholesky(A, upper=True)
self.assertEqual(expected, actual)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# cholesky requires the input to be a square matrix or batch of square matrices
A = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
A = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):
np.linalg.cholesky(A.cpu().numpy())
# cholesky requires the input to be at least 2 dimensional tensor
A = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError,
r'1-dimensional array given\. Array must be at least two-dimensional'):
np.linalg.cholesky(A.cpu().numpy())
# if the input matrix is not positive definite, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):
np.linalg.cholesky(A.cpu().numpy())
# if at least one matrix in the batch is singular, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[4, -1, -1] = 0 # Now A[4] is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 4\): The factorization could not be completed'):
torch.linalg.cholesky(A)
# if out tensor with wrong shape is passed a warning is given
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
out = torch.empty(2, 3, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cholesky(A, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*A.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.cholesky(A, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.cholesky(A, out=out)
# NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_old_cholesky_batched_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
def cholesky_test_helper(n, batchsize, device, upper):
A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)
chol_fact = torch.cholesky(A, upper=upper)
if upper:
# Correctness check
self.assertEqual(A, chol_fact.mT.matmul(chol_fact))
# Upper triangular check
self.assertEqual(chol_fact, chol_fact.triu())
else:
# Correctness check
self.assertEqual(A, chol_fact.matmul(chol_fact.mT))
# Lower triangular check
self.assertEqual(chol_fact, chol_fact.tril())
for upper, batchsize in itertools.product([True, False], [262144, 524288]):
cholesky_test_helper(2, batchsize, device, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def cholesky_test_helper(n, batch_dims, upper):
A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)
cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])
cholesky_exp = cholesky_exp.reshape_as(A)
self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))
for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):
cholesky_test_helper(3, batchsize, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@tf32_on_and_off(0.01)
def test_old_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)
# default Case
C = torch.cholesky(A)
B = torch.mm(C, C.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0)
# test Upper Triangular
U = torch.cholesky(A, True)
B = torch.mm(U.t().conj(), U)
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.cholesky(A, False)
B = torch.mm(L, L.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_empty(self, device, dtype):
def run_test(upper):
A = torch.empty(0, 0, dtype=dtype, device=device)
chol = torch.cholesky(A, upper)
chol_A = torch.matmul(chol, chol.t().conj())
self.assertEqual(A, chol_A)
for upper in [True, False]:
run_test(upper)
# Test for issue
# https://github.com/pytorch/pytorch/issues/57032
# torch.cholesky with upper=True for batched CUDA inputs was wrong
# it was using the lower triangular part instead of the upper one
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched_upper(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_non_pd(self, device, dtype):
# if the input matrix is not positive definite, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
# Test out variant
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
# test transpose
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
# test 0 strided tensor
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
# test scalar
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
# test nans and infs are not propagated to the output when beta == 0
float_and_complex_dtypes = floating_and_complex_types_and(torch.half, torch.bfloat16)
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*integral_types())
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in complex_types():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
# don't use @dtypes decorator to avoid generating ~1700 tests per device
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
# for real inputs, this phase factor is plus or minus one
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
# check the out= variant
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
# check lower case uplo
# use non-symmetric input to check whether uplo argument is working as intended
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# eigh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
# eigh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigh(a, out=(out_w, out_v))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigh(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# check the out= variant
out = torch.empty_like(actual_w)
ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, actual_w)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvalsh_errors_and_warnings(self, device, dtype):
# eigvalsh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvalsh(t)
# eigvalsh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigvalsh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
real_dtype = t.real.dtype if dtype.is_complex else dtype
out = torch.empty_like(t).to(real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvalsh(t, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigvalsh(t, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvalsh(t, out=out)
@dtypes(*floating_and_complex_types())
def test_kron(self, device, dtype):
def run_test_case(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device)
b = torch.rand(b_shape, dtype=dtype, device=device)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_case(a_shape, b_shape)
@dtypes(*floating_and_complex_types())
def test_kron_empty(self, device, dtype):
def run_test_case(empty_shape):
a = torch.eye(3, dtype=dtype, device=device)
b = torch.empty(empty_shape, dtype=dtype, device=device)
result = torch.kron(a, b)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(result, expected)
# NumPy doesn't work if the first argument is empty
result = torch.kron(b, a)
self.assertEqual(result.shape, expected.shape)
empty_shapes = [(0,), (2, 0), (1, 0, 3)]
for empty_shape in empty_shapes:
run_test_case(empty_shape)
@dtypes(*floating_and_complex_types())
def test_kron_errors_and_warnings(self, device, dtype):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.eye(3, dtype=dtype, device=device)
b = torch.ones((2, 2), dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.kron(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should match
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
torch.kron(a, b, out=out)
# This test confirms that torch.linalg.norm's dtype argument works
# as expected, according to the function's documentation
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_norm_dtype(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input_size, ord, keepdim, to_dtype):
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'dtype={dtype}, to_dtype={to_dtype}')
input = make_arg(input_size)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
self.assertEqual(result.dtype, input.real.dtype, msg=msg)
result_out = torch.empty((0), dtype=result.dtype, device=device)
torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
result = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)
result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result, result_with_dtype, msg=msg)
result_out_with_dtype = torch.empty_like(result_with_dtype)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)
self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
# In these orders we are computing the 10-th power and 10-th root of numbers.
# We avoid them for half-precision types as it makes the tests above too badly conditioned
if dtype != torch.float16 and dtype != torch.bfloat16:
ord_vector.extend([0.1, -0.1])
ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]
S = 10
if dtype == torch.cfloat:
norm_dtypes = (torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (torch.cdouble,)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (torch.double,)
else:
raise RuntimeError("Unsupported dtype")
for ord, keepdim, norm_dtype in product(ord_vector, (True, False), norm_dtypes):
run_test_case((S,) , ord, keepdim, norm_dtype)
for ord, keepdim, norm_dtype in product(ord_matrix, (True, False), norm_dtypes):
if ord in [2, -2, 'nuc']:
# We need torch.svdvals
if dtype == torch.float16 or dtype == torch.bfloat16:
continue
# We need LAPACK or equivalent
if ((torch.device(device).type == 'cuda' and not torch.cuda.has_magma and not has_cusolver()) or
(torch.device(device).type == 'cpu' and not torch._C.has_lapack)):
continue
run_test_case((S, S) , ord, keepdim, norm_dtype)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
# The operation does not have an identity.
error_msg = "linalg.vector_norm cannot compute"
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
else:
msg = (f'input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}')
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
if dtype.is_complex:
result_dtype_reference = result_dtype_reference.real
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
ref = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
actual = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(ref, actual, msg=msg)
if dtype == torch.cfloat:
norm_dtypes = (None, torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (None, torch.cdouble)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (None, torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (None, torch.double)
else:
raise RuntimeError("Unsupported dtype")
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
# input size, dim, error, error message
((4, ), (0, ), None, None),
((4, ), (1, ), IndexError, r'Dimension out of range'),
((4, ), (-2, ), IndexError, r'Dimension out of range'),
((4, 3), (0, -1), None, None),
((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, 1.0), TypeError, r"argument 'dim' must be tuple of ints"),
((4, 3), (None, ), TypeError, r"argument 'dim' must be tuple of ints"),
]
for input_size, dim_tuple, error, error_msg in test_cases:
input = torch.randn(input_size, device=device)
# vector_norm should accept a tuple or a list for dim arg
for dim in [dim_tuple, list(dim_tuple)]:
if error is None:
torch.linalg.vector_norm(input, dim=dim)
else:
with self.assertRaises(error):
torch.linalg.vector_norm(input, dim=dim)
# This test compares torch.linalg.norm and numpy.linalg.norm to ensure that
# their vector norm results match
@dtypes(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]
S = 10
test_cases = [
# input size, p settings, dim
((S, ), ord_vector, None),
((S, ), ord_vector, 0),
((S, S, S), ord_vector, 0),
((S, S, S), ord_vector, 1),
((S, S, S), ord_vector, 2),
((S, S, S), ord_vector, -1),
((S, S, S), ord_vector, -2),
]
L = 1_000_000
if dtype == torch.double:
test_cases.append(((L, ), ord_vector, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
# This test compares torch.linalg.norm, torch.linalg.matrix_norm and numpy.linalg.norm to
# ensure that their matrix norm results match.
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-4})
def test_norm_matrix(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
if ord is not None and dim is not None:
result = torch.linalg.matrix_norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']
S = 10
test_cases = [
# input size, dim
((S, S), None),
((S, S), (0, 1)),
((S, S), (1, 0)),
((S, S, S, S), (2, 0)),
((S, S, S, S), (-1, -2)),
((S, S, S, S), (-1, -3)),
((S, S, S, S), (-3, 2)),
]
for (shape, dim), keepdim, ord in product(test_cases, [True, False], ord_matrix):
if ord in [2, -2, 'nuc']:
# We need torch.svdvals
if dtype == torch.float16 or dtype == torch.bfloat16:
continue
# We need LAPACK or equivalent
if ((torch.device(device).type == 'cuda' and not torch.cuda.has_magma and not has_cusolver()) or
(torch.device(device).type == 'cpu' and not torch._C.has_lapack)):
continue
run_test_case(make_arg(shape), ord, dim, keepdim)
@onlyCUDA
@dtypes(torch.bfloat16, torch.float16)
def test_norm_fused_type_promotion(self, device, dtype):
x = torch.randn(10, device=device, dtype=dtype)
def profile_and_check(fn, x, kwargs):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
fn(x, **kwargs, dtype=torch.float)
# smoke check that profiler returned some events
self.assertTrue("aten::linalg_vector_norm" in (e.name for e in p.events()))
# test that there was no explicit copy
self.assertFalse("aten::to" in (e.name for e in p.events()))
for f, kwargs, in zip((torch.linalg.vector_norm, torch.norm), ({}, {"p" : 2})):
profile_and_check(f, x, kwargs)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond(self, device, dtype):
def run_test_case(input, p):
result = torch.linalg.cond(input, p)
result_numpy = np.linalg.cond(input.cpu().numpy(), p)
self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)
self.assertEqual(result.shape, result_numpy.shape)
# test out= variant
out = torch.empty_like(result)
ans = torch.linalg.cond(input, p, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
input_sizes = [(32, 32), (2, 3, 3, 3)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test empty batch sizes
input_sizes = [(0, 3, 3), (0, 2, 5, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test non-square input
input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in [2, -2, None]:
run_test_case(input, p)
# test for singular input
a = torch.eye(3, dtype=dtype, device=device)
a[-1, -1] = 0 # make 'a' singular
for p in norm_types:
try:
run_test_case(a, p)
except np.linalg.LinAlgError:
# Numpy may fail to converge for some BLAS backends (although this is very rare)
# See the discussion in https://github.com/pytorch/pytorch/issues/67675
pass
# test for 0x0 matrices. NumPy doesn't work for such input, we return 0
input_sizes = [(0, 0), (2, 5, 0, 0)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in ['fro', 2]:
expected_dtype = a.real.dtype if dtype.is_complex else dtype
expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)
actual = torch.linalg.cond(input, p)
self.assertEqual(actual, expected)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond_errors_and_warnings(self, device, dtype):
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
# cond expects the input to be at least 2-dimensional
a = torch.ones(3, dtype=dtype, device=device)
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):
torch.linalg.cond(a, p)
# for some norm types cond expects the input to be square
a = torch.ones(3, 2, dtype=dtype, device=device)
norm_types = [1, -1, inf, -inf, 'fro', 'nuc']
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cond(a, p)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.ones((2, 2), dtype=dtype, device=device)
for p in ['fro', 2]:
real_dtype = a.real.dtype if dtype.is_complex else dtype
out = torch.empty(a.shape, dtype=real_dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cond(a, p, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cond(a, p, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.cond(a, p, out=out)
# for batched input if at least one matrix in the batch is not invertible,
# we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.
# this should change when at::inverse works with silent errors
# NumPy works fine in this case because it's possible to silence the error and get the inverse matrix results
# possibly filled with NANs
batch_dim = 3
a = torch.eye(3, 3, dtype=dtype, device=device)
a = a.reshape((1, 3, 3))
a = a.repeat(batch_dim, 1, 1)
a[1, -1, -1] = 0 # now a[1] is singular
for p in [1, -1, inf, -inf, 'fro', 'nuc']:
result = torch.linalg.cond(a, p)
self.assertEqual(result[1], float('inf'))
# check invalid norm type
a = torch.ones(3, 3, dtype=dtype, device=device)
for p in ['wrong_norm', 5]:
with self.assertRaisesRegex(RuntimeError, f"linalg.cond got an invalid norm type: {p}"):
torch.linalg.cond(a, p)
# This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments
# to ensure that they both throw errors
@dtypes(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = (
f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}')
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [
# input size, p settings, dim, error type, error regex
((S, ), ['fro', 'nuc'], None, RuntimeError, r'A must have at least 2 dimensions'),
((S, S), [3.5], None, RuntimeError, r'matrix_norm: Order 3.5 not supported'),
((S, S), [0], None, RuntimeError, r'matrix_norm: Order 0 not supported'),
((S, S), ['fail'], None, RuntimeError, r'matrix_norm: Order fail not supported'),
((S, S), ['fro', 'nuc'], 0, RuntimeError, r'matrix_norm: dim must be a 2-tuple'),
((S, S), ['fro', 'nuc', 2], (0, 0), RuntimeError, r'dims must be different'),
((S, S), ['fro', 'nuc', 2], (-1, 1), RuntimeError, r'dims must be different'),
((S, S), ['fro', 'nuc', 2], (0, 4), IndexError, r'Dimension out of range'),
((S, ), [0], (4, ), IndexError, r'Dimension out of range'),
((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),
((S, S, S), [1], (0, 1, 2), RuntimeError, r"If dim is specified, it must be of length 1 or 2."),
((S, S, S), [1], None, RuntimeError, r"If dim is not specified but ord is, the input must be 1D or 2D"),
]
for keepdim in [True, False]:
for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
# Test complex number inputs for linalg.norm
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.cfloat, torch.cdouble)
@precisionOverride({torch.cfloat: 5e-4})
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return "complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s" % (
input_size, ord, keepdim, dim)
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in matrix_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out, expected, msg=msg)
# Test that linal.vector_norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
def test_vector_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
vectors = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
for vector in vectors:
x = torch.tensor(vector, device=device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.vector_norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_matrix_norm(self, device, dtype):
# Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm
A = make_tensor((2, 2, 2), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm:.*must have at least 2 dimensions.*'):
torch.linalg.matrix_norm(make_tensor((2,), dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm:.*must be a 2-tuple.*'):
torch.linalg.matrix_norm(A, dim=(0,))
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=0)
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=3.0)
# Test dim=None behavior
ref = torch.linalg.norm(A, dim=(-2, -1))
res = torch.linalg.matrix_norm(A)
self.assertEqual(ref, res)
# Test that linal.norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_MACOS, "Skipped on MacOS!")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
# matrix_ords 'nuc', 2, -2 are skipped currently
# See issue https://github.com/pytorch/pytorch/issues/71911
matrix_ords = ['fro', 1, inf, -1, -inf]
vectors = []
matrices = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# TODO: Remove this function once the broken cases are fixed
def is_broken_matrix_norm_case(ord, x):
if self.device_type == 'cuda':
if x.size() == torch.Size([1, 2]):
if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:
# These cases are broken because of an issue with svd
# https://github.com/pytorch/pytorch/issues/43567
return True
if ord in ['nuc', 2, -2]:
# These cases are broken because of another issue with svd
# https://github.com/pytorch/pytorch/issues/52633
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
if is_broken_matrix_norm_case(ord, x):
continue
else:
result_n = np.linalg.norm(x_n, ord=ord)
result = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# Test degenerate shape results match numpy for linalg.norm vector norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(TEST_WITH_ASAN, "Skipped on ASAN since it checks for undefined behavior.")
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
S = 10
test_cases = [
# input size, dim
((0, ), None),
((0, S), 0),
((0, S), 1),
((S, 0), 0),
((S, 0), 1),
]
for keepdim in [True, False]:
for input_size, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim)
# Test degenerate shape results match numpy for linalg.norm matrix norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error, dim
((0, 0), [1, 2, inf, -1, -2, -inf], None),
((0, S), [2, inf, -2, -inf], None),
((S, 0), [1, 2, -1, -2], None),
((S, S, 0), [], (0, 1)),
((1, S, 0), [], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, ord in error_ords)
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
# slow path
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)
self.assertEqual(result, expected)
# fast 0-norm
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
# fast 1-norm
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
# fast 2-norm
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
# fast 3-norm
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)
self.assertEqual(result, expected)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eig_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eig(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected[0], axis=-1)[::-1]
expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]
actual_np = [x.cpu().numpy() for x in actual]
sorted_actual = (
np.take_along_axis(actual_np[0], ind, axis=-1),
np.take_along_axis(actual_np[1], ind[:, None], axis=-1))
self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)
self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eig(a.to(complementary_device))
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@slowTest
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32)
def test_eig_check_magma(self, device, dtype):
# For CUDA inputs only matrices of size larger than 2048x2048 actually call MAGMA library
shape = (2049, 2049)
a = make_tensor(shape, dtype=dtype, device=device)
w, v = torch.linalg.eig(a)
# check correctness using eigendecomposition identity
self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eig_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eig(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eig(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvectors to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(0, dtype=torch.int, device=device)
out1 = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, dtype=torch.complex128, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
# if non-empty out tensor with wrong shape is passed a warning is given
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(1, device=device, dtype=torch.complex128)
out1 = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eig(a, out=(out0, out1))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
out_v = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=torch.complex128)
out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_with_nan(self, device, dtype):
for val in [np.inf, np.nan]:
for batch_dim in [(), (10,)]:
a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)
a[..., -1, -1] = val
with self.assertRaisesRegex(RuntimeError, "torch.linalg.eig: input tensor should not"):
torch.linalg.eig(a)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eigvals_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eigvals(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected, axis=-1)[::-1]
expected = np.take_along_axis(expected, ind, axis=-1)
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]
actual_np = actual.cpu().numpy()
sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)
self.assertEqual(expected, sorted_actual, exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eigvals_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eigvals(a.to(complementary_device))
self.assertEqual(expected, actual)
# check out= variant
complex_dtype = dtype
if not dtype.is_complex:
complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64
out = torch.empty(0, dtype=complex_dtype, device=device)
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
# check non-contiguous out
if a.numel() > 0:
out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvals_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eigvals(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvals(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eigvals(a, out=out)
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigvals(a, out=out)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvals(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvals(a, out=out_w)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
def test_norm_old_nan_propagation(self, device):
ords = [inf, -inf]
for pair in itertools.product([0.0, nan, 1.0], repeat=2):
x = torch.tensor(list(pair), device=device)
for ord in ords:
result = torch.norm(x, p=ord)
result_check = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_check)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_complex_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in ['nuc', 'fro']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, rtol=4e-6, atol=6e-4)
# Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations
@dtypes(torch.float)
def test_norm_fro_2_equivalence_old(self, device, dtype):
input_sizes = [
(0,),
(10,),
(0, 0),
(4, 30),
(0, 45),
(100, 0),
(45, 10, 23),
(0, 23, 59),
(23, 0, 37),
(34, 58, 0),
(0, 0, 348),
(0, 3434, 0),
(0, 0, 0),
(5, 3, 8, 1, 3, 5)]
for input_size in input_sizes:
a = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
# Try full reduction
dim_settings = [None]
# Try all possible 1-D reductions
dim_settings += list(range(-a.dim(), a.dim()))
def wrap_dim(dim, ndims):
assert (dim < ndims) and (dim >= -ndims)
if dim >= 0:
return dim
else:
return dim + ndims
# Try all possible 2-D reductions
dim_settings += [
(d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)
if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]
for dim in dim_settings:
for keepdim in [True, False]:
a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)
a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)
self.assertEqual(a_norm_fro, a_norm_2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_nuclear_norm_axes_small_brute_force_old(self, device):
def check_single_nuclear_norm(x, axes):
if self.device_type != 'cpu' and randrange(100) < 95:
return # too many cpu <==> device copies
a = np.array(x.cpu(), copy=False)
expected = np.linalg.norm(a, "nuc", axis=axes)
ans = torch.norm(x, "nuc", dim=axes)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)
ans = torch.norm(x, "nuc", dim=axes, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
for n in range(1, 3):
for m in range(1, 3):
for axes in itertools.permutations([0, 1], 2):
# 2d, inner dimensions C
x = torch.randn(n, m, device=device)
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions Fortran
x = torch.randn(m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions non-contiguous
x = torch.randn(n, 2 * m, device=device)[:, ::2]
check_single_nuclear_norm(x, axes)
# 2d, all dimensions non-contiguous
x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]
check_single_nuclear_norm(x, axes)
for o in range(1, 3):
for axes in itertools.permutations([0, 1, 2], 2):
# 3d, inner dimensions C
x = torch.randn(o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions Fortran
x = torch.randn(o, m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions non-contiguous
x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]
check_single_nuclear_norm(x, axes)
# 3d, all dimensions non-contiguous
x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]
check_single_nuclear_norm(x, axes)
for r in range(1, 3):
for axes in itertools.permutations([0, 1, 2, 3], 2):
# 4d, inner dimensions C
x = torch.randn(r, o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions Fortran
x = torch.randn(r, o, n, m, device=device).mT
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions non-contiguous
x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]
check_single_nuclear_norm(x, axes)
# 4d, all dimensions non-contiguous
x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]
check_single_nuclear_norm(x, axes)
@skipCUDAIfNoMagma
def test_nuclear_norm_exceptions_old(self, device):
for lst in [], [1], [1, 2]:
x = torch.tensor(lst, dtype=torch.double, device=device)
for axes in (), (0,):
self.assertRaises(RuntimeError, torch.norm, x, "nuc", axes)
self.assertRaises(RuntimeError, torch.norm, x, "nuc", (0, 1))
x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, "must be different", torch.norm, x, "nuc", (0, 0))
self.assertRaisesRegex(IndexError, "Dimension out of range", torch.norm, x, "nuc", (0, 2))
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd_lowrank(self, device, dtype):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as torch.svd
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@precisionOverride({torch.float: 1e-4, torch.cfloat: 2e-4})
@setLinalgBackendsToDefaultFinally
@dtypes(*floating_and_complex_types())
def test_svd(self, device, dtype):
# tests linalg.svd, svd, linalg.svdvals
make_arg = partial(make_tensor, dtype=dtype, device=device)
backends = ["default"]
if torch.device(device).type == 'cuda':
if torch.cuda.has_magma:
backends.append("magma")
if has_cusolver():
backends.append("cusolver")
ns = (12, 4, 2, 0)
batches = ((), (0,), (1,), (2,), (2, 1), (0, 2))
drivers = (None, 'gesvd', 'gesvdj', 'gesvda')
for backend in backends:
torch.backends.cuda.preferred_linalg_library(backend)
for batch, m, n, driver in product(batches, ns, ns, drivers):
if not (backend == 'cusolver' or driver is None):
# only test cases below and skip otherwise:
# - backend == 'cusolver' (driver can be anything)
# - backend != 'cusolver' (driver should only be None)
continue
shape = batch + (m, n)
k = min(m, n)
A = make_arg(shape)
U, S, Vh = torch.linalg.svd(A, full_matrices=False, driver=driver)
self.assertEqual((U @ S.to(A.dtype).diag_embed()) @ Vh, A)
U_f, S_f, Vh_f = torch.linalg.svd(A, full_matrices=True, driver=driver)
self.assertEqual(S_f, S)
self.assertEqual((U_f[..., :k] @ S_f.to(A.dtype).diag_embed()) @ Vh_f[..., :k, :], A)
S_s = torch.linalg.svdvals(A, driver=driver)
self.assertEqual(S_s, S)
U, S, V = torch.svd(A, some=True)
self.assertEqual((U @ S.to(A.dtype).diag_embed()) @ V.mH, A)
U_f, S_f, V_f = torch.svd(A, some=False)
self.assertEqual(S_f, S)
self.assertEqual((U_f[..., :k] @ S_f.to(A.dtype).diag_embed()) @ V_f[..., :k].mH, A)
S_s = torch.svd(A, compute_uv=False).S
self.assertEqual(S_s, S)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.complex128)
def test_invariance_error_spectral_decompositions(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=True)
A = make_arg((3, 3))
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
U, _, Vh = torch.linalg.svd(A, full_matrices=False)
(U + Vh).sum().backward()
A = make_arg((3, 3))
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
V = torch.linalg.eig(A).eigenvectors
V.sum().backward()
A = make_arg((3, 3))
A = A + A.mH
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
Q = torch.linalg.eigh(A).eigenvectors
Q.sum().backward()
@skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case
@skipCUDAIfRocm
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_memory_allocation(self, device, dtype):
# test for https://github.com/pytorch/pytorch/issues/61949
# the problem was that tensors of incorrect size were allocated and then narrowed
m = 3
n = 2**20
a = make_tensor((m, n), dtype=dtype, device=device)
# the following should run without errors
S = torch.linalg.svdvals(a)
result = torch.linalg.svd(a, full_matrices=False)
self.assertEqual(result.S, S)
def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)
L = torch.cholesky(A, upper=upper)
return b, A, L
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve(self, device, dtype):
for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):
b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched(self, device, dtype):
def cholesky_solve_batch_helper(A_dims, b_dims, upper):
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.cholesky_solve(b, L, upper=upper) # Actual output
self.assertEqual(x_act, x_exp) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax) # Correctness check
for upper, batchsize in itertools.product([True, False], [1, 3, 4]):
cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):
for upper in [True, False]:
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x = torch.cholesky_solve(b, L, upper)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(A_dims, b_dims, upper):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,
dtype=dtype, device='cpu')
b = torch.randn(*b_dims, dtype=dtype, device='cpu')
x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)
A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)
L = torch.linalg.cholesky(A, upper=upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
# https://github.com/pytorch/pytorch/issues/42695
x = torch.cholesky_solve(b, L, upper=upper, out=x)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
for upper in [True, False]:
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), upper) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), upper) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_solve(b, a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.cholesky_solve(b, a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.cholesky_solve(b, a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_inverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(torch_inverse, matrix, batches, n):
matrix_inverse = torch_inverse(matrix)
# Compare against NumPy output
# NumPy uses 'gesv' LAPACK routine solving the equation A A_inv = I
# But in PyTorch 'gertf' + 'getrs' is used. As such, there may be some element-wise differences
expected = np.linalg.inv(matrix.cpu().numpy())
self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)
# Additional correctness tests, check matrix*matrix_inverse == identity
identity = torch.eye(n, dtype=dtype, device=device)
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))
# check the out= variant
# prepare the expected out tensor
matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)
matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)
matrix_inverse_out = matrix_inverse_out_t.mT
ans = torch_inverse(matrix, out=matrix_inverse_out)
self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)
self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)
# batched matrices: 3+ dimensional tensors, check matrix_inverse same as single-inverse for each matrix
if matrix.ndim > 2 and batches[0] != 0:
expected_inv_list = []
p = int(np.prod(batches)) # use `p` instead of -1, so that the test works for empty input as well
for mat in matrix.contiguous().view(p, n, n):
expected_inv_list.append(torch_inverse(mat))
expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)
if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:
# single-inverse is done using cuSOLVER, while batched inverse is done using MAGMA
# individual values can be significantly different for fp32, hence rather high rtol is used
# the important thing is that torch_inverse passes above checks with identity
self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)
else:
self.assertEqual(matrix_inverse, expected_inv)
# helper function for testing torch.linalg.inv_ex
def test_inv_ex(input, out=None):
if out is not None:
info = torch.empty(0, dtype=torch.int32, device=device)
return torch.linalg.inv_ex(input, out=(out, info)).inverse
return torch.linalg.inv_ex(input).inverse
for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:
for batches, n in itertools.product(
[[], [0], [2], [2, 1]],
[0, 5]
):
matrices = make_arg(*batches, n, n)
run_test(torch_inverse, matrices, batches, n)
# test non-contiguous input
run_test(torch_inverse, matrices.mT, batches, n)
if n > 0:
run_test(
torch_inverse,
make_arg(*batches, 2 * n, 2 * n)
.view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),
batches, n
)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_info_device(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
info = torch.linalg.inv_ex(A).info
self.assertTrue(info.device == A.device)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_singular(self, device, dtype):
# if the input matrix is not invertible, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
info = torch.linalg.inv_ex(A).info
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError,
r'diagonal element 3 is zero, the inversion could not be completed'):
torch.linalg.inv_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
info = torch.linalg.inv_ex(A).info
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The diagonal element 2 is zero'):
torch.linalg.inv_ex(A, check_errors=True)
@slowTest
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@skipCUDAIfRocm
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_inverse_many_batches(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def test_inverse_many_batches_helper(torch_inverse, b, n):
matrices = make_arg(b, n, n)
matrices_inverse = torch_inverse(matrices)
# Compare against NumPy output
expected = np.linalg.inv(matrices.cpu().numpy())
self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)
for torch_inverse in [torch.inverse, torch.linalg.inv]:
test_inverse_many_batches_helper(torch_inverse, 5, 256)
test_inverse_many_batches_helper(torch_inverse, 3, 512)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@dtypes(*floating_and_complex_types())
def test_inverse_errors(self, device, dtype):
# inverse expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.inverse(torch.randn(2, 3, 4, 3))
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
x[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.inverse(x)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Test fails for float64 on GPU (P100, V100) on Meta infra")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@skipCUDAIfRocm
@skipCUDAVersionIn([(11, 3), (11, 6), (11, 7)]) # https://github.com/pytorch/pytorch/issues/57482
@dtypes(*floating_and_complex_types())
def test_inverse_errors_large(self, device, dtype):
# Test batched inverse of singular matrices reports errors without crashing (gh-51930)
x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)
x[:] = torch.eye(616, dtype=dtype, device=device)
x[..., 10, 10] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 0\): The diagonal element 11 is zero'):
torch.inverse(x)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test_main(A, hermitian):
# Testing against definition for pseudo-inverses
A_pinv = torch.linalg.pinv(A, hermitian=hermitian)
np_A = A.cpu().numpy()
np_A_pinv = A_pinv.cpu().numpy()
if A.numel() > 0:
self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)
self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)
self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))
self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))
else:
self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))
# Check out= variant
out = torch.empty_like(A_pinv)
ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, A_pinv)
def run_test_numpy(A, hermitian):
# Check against NumPy output
# Test float rcond, and specific value for each matrix
rconds = [float(torch.rand(1)), ]
# Test different types of rcond tensor
for rcond_type in all_types():
rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))
# Test broadcasting of rcond
if A.ndim > 2:
rconds.append(torch.rand(A.shape[-3], device=device))
for rcond in rconds:
actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)
torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)
self.assertEqual(actual, torch_rtol)
numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()
expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)
self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
A = torch.randn(*sizes, dtype=dtype, device=device)
hermitian = False
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
# Check hermitian = True
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(0, 0), (3, 0, 0), ]: # zero numel square matrices
A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)
hermitian = True
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv_errors_and_warnings(self, device, dtype):
# pinv requires at least 2D tensor
a = torch.randn(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "expected a tensor with 2 or more dimensions"):
torch.linalg.pinv(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
out = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.pinv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes of out and input should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.pinv(a, out=out)
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.pinv(a, out=out)
# device of rcond and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
rcond = torch.full((), 1e-2, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.pinv(a, rcond=rcond)
# rcond can't be complex
rcond = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rcond tensor of complex type is not supported"):
torch.linalg.pinv(a, rcond=rcond)
# atol can't be complex
atol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "atol tensor of complex type is not supported"):
torch.linalg.pinv(a, atol=atol)
# rtol can't be complex
rtol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rtol tensor of complex type is not supported"):
torch.linalg.pinv(a, rtol=rtol)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_errors_and_warnings(self, device, dtype):
# inv expects batches of square matrices as input
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.inv(a)
# inv requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.inv(a)
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf"\(Batch element {n}\): The diagonal element 3 is zero"):
torch.linalg.inv(a)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# dtypes should match
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.inv(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.inv(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor in batched column major format but with wrong a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(3, 3, dtype=dtype, device=device)
out = out.mT.clone(memory_format=torch.contiguous_format)
out = out.mT
self.assertTrue(out.mT.is_contiguous())
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
def solve_test_helper(self, A_dims, b_dims, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
return b, A
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve(self, device, dtype):
def run_test(n, batch, rhs):
A_dims = (*batch, n, n)
b_dims = (*batch, n, *rhs)
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
# Correctness test
x = torch.linalg.solve(A, b)
if rhs == ():
Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())
Ax.squeeze_(-1)
else:
Ax = np.matmul(A.cpu(), x.cpu())
self.assertEqual(b.expand_as(Ax), Ax)
# Check against NumPy
expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy())
self.assertEqual(x, expected)
batches = [(), (0, ), (3, ), (2, 3)]
ns = [0, 5, 32]
nrhs = [(), (1, ), (5, )]
for n, batch, rhs in itertools.product(ns, batches, nrhs):
run_test(n, batch, rhs)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
def run_test(A_dims, B_dims):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
B, A = self.solve_test_helper(A_batch_dims + (A_matrix_size, A_matrix_size), B_dims, device, dtype)
actual = torch.linalg.solve(A, B)
expected = solve(A.cpu().numpy(), B.cpu().numpy())
self.assertEqual(actual, expected)
# test against numpy.linalg.solve
run_test((5, 5), (2, 0, 5, 3)) # broadcasting with 0 batch dim
run_test((2, 0, 5, 5), (5, 3)) # broadcasting with 0 batch dim
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting B
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & B
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve(self, device, dtype):
def run_test(a_shape, dims):
a = torch.randn(a_shape, dtype=dtype, device=device)
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_tensorsolve_empty(self, device, dtype):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
b = torch.empty(a.shape[:2], dtype=dtype, device=device)
x = torch.linalg.tensorsolve(a, b)
self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32)
def test_tensorsolve_errors_and_warnings(self, device, dtype):
# tensorsolve expects the input that can be reshaped to a square matrix
a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))
b = torch.randn(8, 4, dtype=dtype, device=device)
self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))
with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):
torch.linalg.tensorsolve(a, b)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty_like(a)
b = torch.randn(6, 4, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorsolve(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorsolve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorsolve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv(self, device, dtype):
def run_test(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
# compare to NumPy output
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_empty(self, device, dtype):
for ind in range(1, 4):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
a_inv = torch.linalg.tensorinv(a, ind=ind)
self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_errors_and_warnings(self, device, dtype):
def check_shape(a_shape, ind):
# tensorinv requires the input to satisfy
# prod(a.shape[ind:]) == prod(a.shape[:ind])
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected self to satisfy the requirement"):
torch.linalg.tensorinv(a, ind=ind)
def check_ind(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected a strictly positive integer"):
torch.linalg.tensorinv(a, ind=ind)
def check_out(a_shape, ind):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(a_shape, dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorinv(a, ind=ind, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# test for invalid shape
check_shape((2, 3, 4), ind=1)
check_shape((1, 2, 3, 4), ind=3)
# test for invalid ind
check_ind((12, 3, 4), ind=-1)
check_ind((18, 3, 3, 2), ind=0)
# test for invalid out tensor
check_out((12, 3, 4), ind=1)
check_out((3, 8, 24), ind=2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_singular_input(self, device, dtype):
def check_singular_input(a_shape, ind):
prod_ind_end = np.prod(a_shape[ind:])
a = torch.eye(prod_ind_end, dtype=dtype, device=device)
a[-1, -1] = 0 # Now `a` is singular
a = a.reshape(a_shape)
with self.assertRaisesRegex(torch.linalg.LinAlgError, "The diagonal element"):
torch.linalg.tensorinv(a, ind=ind)
# test for non-invertible input
check_singular_input((12, 3, 4), ind=1)
check_singular_input((3, 6, 18), ind=2)
def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):
def check(x, y):
# Compare with numpy
res = torch_fn(x, y)
if x.dtype == torch.bfloat16:
ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))
else:
ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))
if res.dtype == torch.bfloat16:
self.assertEqual(res.cpu(), ref.bfloat16())
else:
self.assertEqual(res.cpu(), ref)
# Test out variant
out = torch.empty_like(res)
torch_fn(x, y, out=out)
self.assertEqual(out, res)
# Empty
x = torch.tensor([], dtype=dtype, device=device)
y = torch.tensor([], dtype=dtype, device=device)
check(x, y)
# Contiguous
x = 0.1 * torch.randn(5000, dtype=dtype, device=device)
y = 0.1 * torch.randn(5000, dtype=dtype, device=device)
check(x, y)
# 0 strided
y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)
check(x, y)
# 2 strided
check(x[::2], y[::2])
@dtypes(torch.float, torch.cfloat, torch.bfloat16)
@dtypesIfCUDA(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})
def test_dot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})
def test_vdot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)
def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):
def check(x, y, regex):
with self.assertRaisesRegex(RuntimeError, regex):
torch_fn(x, y)
if complex_dtypes:
x = torch.randn(1, dtype=torch.cfloat, device=device)
y = torch.randn(3, dtype=torch.cdouble, device=device)
else:
x = torch.randn(1, dtype=torch.float, device=device)
y = torch.randn(3, dtype=torch.double, device=device)
check(x, y, 'dot : expected both vectors to have same dtype')
check(x.reshape(1, 1), y, '1D tensors expected')
check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')
if self.device_type != 'cpu':
x_cpu = x.expand(3).cpu()
check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')
@onlyNativeDeviceTypes
def test_vdot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.vdot)
self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)
@onlyNativeDeviceTypes
def test_dot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.dot)
self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
# check against NumPy
self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))
self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))
# hermitian flag for NumPy was added in 1.14.0
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(rank_aaH_hermitian,
np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))
self.assertEqual(matrix_rank(aaH, 0.01, True),
np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))
# check out= variant
out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)
ans = matrix_rank(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, rank_a)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_atol(self, device, dtype):
def run_test_atol(shape0, shape1, batch):
a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)
# Check against NumPy output
# Test float tol, and specific value for each matrix
tolerances = [float(torch.rand(1)), ]
# Test different types of tol tensor
for tol_type in all_types():
tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))
# Test broadcasting of tol
if a.ndim > 2:
tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))
for tol in tolerances:
actual = torch.linalg.matrix_rank(a, atol=tol)
actual_tol = torch.linalg.matrix_rank(a, tol=tol)
self.assertEqual(actual, actual_tol)
numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()
expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)
self.assertEqual(actual, expected)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test_atol(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64)
def test_matrix_rank_atol_rtol(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
# creates a matrix with singular values rank=n and singular values in range [2/3, 3/2]
# the singular values are 1 + 1/2, 1 - 1/3, 1 + 1/4, 1 - 1/5, ...
n = 9
a = make_arg(n, n)
# test float and tensor variants
for tol_value in [0.81, torch.tensor(0.81, device=device)]:
# using rtol (relative tolerance) takes into account the largest singular value (1.5 in this case)
result = torch.linalg.matrix_rank(a, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above 1.5*0.81 = 1.215
# atol is used directly to compare with singular values
result = torch.linalg.matrix_rank(a, atol=tol_value)
self.assertEqual(result, 7) # there are 7 singular values above 0.81
# when both are specified the maximum tolerance is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above max(0.81, 1.5*0.81)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAVersionIn([(11, 6), (11, 7)]) # https://github.com/pytorch/pytorch/issues/75391
@dtypes(*floating_and_complex_types())
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
# Trigger warning
torch.linalg.matrix_rank(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.
def test_chain_matmul(self, device, dtype):
# chain_matmul accepts a single input tensor while multi_dot does not
t = make_tensor((2, 2), dtype=dtype, device=device)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
# chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to
# be either 1D or 2D
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, dtype=dtype, device=device), make_tensor(1, dtype=dtype, device=device))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes):
tensors = [make_tensor(shape, dtype=dtype, device=device) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
# test for inputs with empty dimensions
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
# test variable output shapes
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
# test multiple input tensors
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
# test large tensors
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, dtype=dtype, device=device)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), dtype=dtype, device=device), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, dtype=torch.double, device=device)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, dtype=dtype, device="cpu")], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, dtype=dtype, device=device)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), dtype=dtype, device=device), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
# Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
# Check1: A = QR
self.assertEqual(A_, np.matmul(Q_, R_))
# Check2: A = QR (with out)
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
# Check3: Q == Q_out, R == R_out
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
# Check4: Q^{T}Q = I, triu(R) = R
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors
(3, 5), (5, 5), (5, 3), # Single matrix
(7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
#
# for mode='r' we need a special logic because numpy returns only r
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd_errors(self, device, dtype):
# torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but
# without 'q' you cannot compute the backward pass. Check that
# linalg_qr_backward complains cleanly in that case.
inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='r')
self.assertEqual(q.shape, (0,)) # empty tensor
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of linalg.qr depends on Q"):
b.backward()
#
inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='complete')
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The QR decomposition is not differentiable when mode='complete' and nrows > ncols"):
b.backward()
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr. We need some special logic
because numpy does not support batched qr
"""
def np_qr_batched(a, mode):
"""poor's man batched version of np.linalg.qr"""
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
# for mode='r' we need a special logic because numpy returns only r
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_qr_error_cases(self, device, dtype):
t1 = torch.randn(5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, 'linalg.qr: The input tensor A must have at least 2 dimensions.'):
torch.linalg.qr(t1)
t2 = torch.randn((5, 7), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "qr received unrecognized mode 'hello'"):
torch.linalg.qr(t2, mode='hello')
def _check_einsum(self, *args, np_args=None):
if np_args is None:
np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]
ref = np.einsum(*np_args)
res = torch.einsum(*args)
self.assertEqual(ref, res)
# Check that the other variations for opt_einsum work too
if TEST_OPT_EINSUM:
with opt_einsum.flags(enabled=False):
res = torch.einsum(*args)
self.assertEqual(ref, res)
with opt_einsum.flags(enabled=True, strategy='greedy'):
res = torch.einsum(*args)
self.assertEqual(ref, res)
with opt_einsum.flags(enabled=True, strategy='optimal'):
res = torch.einsum(*args)
self.assertEqual(ref, res)
@dtypes(torch.double, torch.cdouble)
def test_einsum(self, device, dtype):
# Test cases from https://gist.github.com/rockt/15ee013889d65342088e9260a377dc8f
x = make_tensor((5,), dtype=dtype, device=device)
y = make_tensor((7,), dtype=dtype, device=device)
A = make_tensor((3, 5), dtype=dtype, device=device)
B = make_tensor((2, 5), dtype=dtype, device=device)
C = make_tensor((2, 3, 5), dtype=dtype, device=device)
D = make_tensor((2, 5, 7), dtype=dtype, device=device)
E = make_tensor((7, 9), dtype=dtype, device=device)
F = make_tensor((2, 3, 3, 5), dtype=dtype, device=device)
G = make_tensor((5, 4, 6), dtype=dtype, device=device)
H = make_tensor((4, 4), dtype=dtype, device=device)
I = make_tensor((2, 3, 2), dtype=dtype, device=device)
# Vector operations
self._check_einsum('i->', x) # sum
self._check_einsum('i,i->', x, x) # dot
self._check_einsum('i,i->i', x, x) # vector element-wisem mul
self._check_einsum('i,j->ij', x, y) # outer
# Matrix operations
self._check_einsum("ij->ji", A) # transpose
self._check_einsum("ij->j", A) # row sum
self._check_einsum("ij->i", A) # col sum
self._check_einsum("ij,ij->ij", A, A) # matrix element-wise mul
self._check_einsum("ij,j->i", A, x) # matrix vector multiplication
self._check_einsum("ij,kj->ik", A, B) # matmul
self._check_einsum("ij,ab->ijab", A, E) # matrix outer product
# Tensor operations
self._check_einsum("Aij,Ajk->Aik", C, D) # batch matmul
self._check_einsum("ijk,jk->i", C, A) # tensor matrix contraction
self._check_einsum("aij,jk->aik", D, E) # tensor matrix contraction
self._check_einsum("abCd,dFg->abCFg", F, G) # tensor tensor contraction
self._check_einsum("ijk,jk->ik", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,jk->ij", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,ik->j", C, B) # non contiguous
self._check_einsum("ijk,ik->jk", C, B) # non contiguous with double indices
# Test diagonals
self._check_einsum("ii", H) # trace
self._check_einsum("ii->i", H) # diagonal
self._check_einsum('iji->j', I) # non-contiguous trace
self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), dtype=dtype, device=device))
# Test ellipsis
self._check_einsum("i...->...", H)
self._check_einsum("ki,...k->i...", A.t(), B)
self._check_einsum("k...,jk->...", A.t(), B)
self._check_einsum('...ik, ...j -> ...ij', C, x)
self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), dtype=dtype, device=device))
self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), dtype=dtype, device=device))
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
r = make_tensor((5, 20), dtype=dtype, device=device, noncontiguous=True)
w = make_tensor((15, 10, 20), dtype=dtype, device=device)
self._check_einsum("bn,anm,bm->ba", l, w, r)
# with strided tensors
self._check_einsum("bn,Anm,bm->bA", l[:, ::2], w[:, ::2, ::2], r[:, ::2])
# test multiple inputs
self._check_einsum("...,be,b...,beg,gi,bc...->bi...", A, B, C, D, E, F)
@dtypes(torch.double, torch.cdouble)
def test_einsum_sublist_format(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = make_tensor((7,), dtype=dtype, device=device)
A = make_tensor((3, 5), dtype=dtype, device=device)
B = make_tensor((2, 5), dtype=dtype, device=device)
C = make_tensor((2, 1, 3, 1, 4), dtype=dtype, device=device)
self._check_einsum(x, [0])
self._check_einsum(x, [0], [])
self._check_einsum(x, [0], y, [1], [0, 1])
self._check_einsum(A, [0, 1], [1, 0])
self._check_einsum(A, [0, 1], x, [1], [0])
self._check_einsum(A, [0, 1], B, [2, 1])
self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])
self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])
self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
r = make_tensor((5, 20), dtype=dtype, device=device, noncontiguous=True)
w = make_tensor((15, 10, 20), dtype=dtype, device=device)
self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_random(self, device, dtype):
def convert_label(label):
if label == ...:
return '...'
elif label < 26:
return chr(ord('A') + label)
else:
return chr(ord('a') + label - 26)
def convert_sublist(sublist):
return ''.join(convert_label(label) for label in sublist)
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=4, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(500)
def test_einsum_corner_cases(self, device):
def check(equation, *operands, expected_output):
tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)
else make_tensor(operand, dtype=torch.float32, device=device) for operand in operands]
output = torch.einsum(equation, tensors)
self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))
# Test equation variantions
check(' ', 1, expected_output=1)
check(' -> ', 1, expected_output=1)
check(' , ', 2, 2, expected_output=4)
check(' , , ', 2, 2, 2, expected_output=8)
check(' , -> ', 2, 2, expected_output=4)
check(' i ', [1], expected_output=[1])
check(' i -> ', [1], expected_output=1)
check(' i -> i ', [1], expected_output=[1])
check(' i , i ', [2], [2], expected_output=4)
check(' i , i -> i ', [2], [2], expected_output=[4])
# Test tensors with 0 size dimensions
check('i', [], expected_output=[])
check(' i j -> j', [[], []], expected_output=[])
check('ij->i', [[], []], expected_output=[0., 0.])
check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])
# Test broadcasting
check('i,j', [2], [1, 2], expected_output=[[2, 4]])
check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])
# Test ellipsis broadcasting
check('...', 1, expected_output=1)
check('...->', 1, expected_output=1)
check('...->...', 1, expected_output=1)
check('...', [1], expected_output=[1])
check('...->', [1], expected_output=1)
check('z...->z', [1], expected_output=[1])
check('Z...->...Z', [1], expected_output=[1])
check('...a->', [[2], [4]], expected_output=6)
check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])
def test_einsum_error_cases(self, device):
def check(*args, regex, exception=RuntimeError):
with self.assertRaisesRegex(exception, r'einsum\(\):.*' + regex):
torch.einsum(*args)
x = make_tensor((2,), dtype=torch.float32, device=device)
y = make_tensor((2, 3), dtype=torch.float32, device=device)
check('', [], regex=r'at least one operand', exception=ValueError)
check('. ..', [x], regex=r'found \'.\' for operand 0 that is not part of any ellipsis')
check('... ...', [x], regex=r'found \'.\' for operand 0 for which an ellipsis was already found')
check('1', [x], regex=r'invalid subscript given at index 0')
check(',', [x], regex=r'fewer operands were provided than specified in the equation')
check('', [x, x], regex=r'more operands were provided than specified in the equation')
check('', [x], regex=r'the number of subscripts in the equation \(0\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai', [x], regex=r'the number of subscripts in the equation \(2\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai...', [x], regex=r'the number of subscripts in the equation \(2\) is more than the number '
r'of dimensions \(1\) for operand 0')
check('a->... .', [x], regex=r'found \'.\' for output but an ellipsis \(...\) was already found')
check('a->..', [x], regex=r'found \'.\' for output that is not part of any ellipsis \(...\)')
check('a->1', [x], regex=r'invalid subscript given at index 3')
check('a->aa', [x], regex=r'output subscript a appears more than once in the output')
check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')
check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\'t match, 3 != 2')
check('...,...', [x, y], regex=r'does not broadcast')
check('a,a', [x, make_tensor((3,), dtype=torch.float32, device=device)], regex=r'does not broadcast')
check('a, ba', [x, y], regex=r'subscript a has size 3 for operand 1 which does not broadcast with previously'
r' seen size 2')
check(x, [-1], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
check(x, [52], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
def _gen_shape_inputs_linalg_triangular_solve(self, shape, dtype, device, well_conditioned=False):
make_arg = partial(make_tensor, dtype=dtype, device=device)
make_randn = partial(torch.randn, dtype=dtype, device=device)
b, n, k = shape
for left, uni, expand_a, tr_a, conj_a, expand_b, tr_b, conj_b in product((True, False), repeat=8):
# expand means that we generate a batch of matrices with a stride of zero in the batch dimension
if (conj_a or conj_b) and not dtype.is_complex:
continue
# We just expand on the batch size
if (expand_a or expand_b) and b == 1:
continue
size_a = (b, n, n) if left else (b, k, k)
size_b = (b, n, k) if not tr_b else (b, k, n)
# If expand_a or expand_b, we'll expand them to the correct size later
if b == 1 or expand_a:
size_a = size_a[1:]
if b == 1 or expand_b:
size_b = size_b[1:]
if well_conditioned:
PLU = torch.linalg.lu(make_randn(*size_a))
if uni:
# A = L from PLU
A = PLU[1].transpose(-2, -1).contiguous()
else:
# A = U from PLU
A = PLU[2].contiguous()
else:
A = make_arg(size_a)
A.triu_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_b)
if tr_a:
A.transpose_(-2, -1)
if tr_b:
B.transpose_(-2, -1)
if conj_a:
A = A.conj()
if conj_b:
B = B.conj()
if expand_a:
A = A.expand(b, *size_a)
if expand_b:
B = B.expand(b, n, k)
yield A, B, left, not tr_a, uni
def _test_linalg_solve_triangular(self, A, B, upper, left, uni):
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
self.assertEqual(A @ X, B)
else:
self.assertEqual(X @ A, B)
out = B
# B may be expanded
if not B.is_contiguous() and not B.transpose(-2, -1).is_contiguous():
out = B.clone()
torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni, out=out)
self.assertEqual(X, out)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-1, torch.complex64: 1e-1,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular(self, device, dtype):
if TEST_WITH_ROCM and dtype is torch.float32:
raise unittest.SkipTest("Skipping for ROCm for Magma backend; unskip when hipSolver backend is enabled")
# This exercises the API + BLAS CPU + batched cuBLAS
ks = (3, 1, 0)
ns = (5, 0)
bs = (1, 2, 0)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for b, n, k in product(bs, ns, ks):
for A, B, left, upper, uni in gen_inputs((b, n, k), dtype, device):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Test fails for float64 on GPU (P100, V100) on Meta infra")
@onlyCUDA
@skipCUDAIfNoMagma # Magma needed for the PLU decomposition
@skipCUDAIfRocm # There is a memory access bug in rocBLAS in the (non-batched) solve_triangular
@skipCUDAVersionIn([(11, 3), (11, 6), (11, 7)]) # Tracked in https://github.com/pytorch/pytorch/issues/70111
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_large(self, device, dtype):
# Exercises magma and cublas
magma = (9, 513, 1)
iterative_cublas = (2, 64, 1)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for shape in (magma, iterative_cublas):
for A, B, left, upper, uni in gen_inputs(shape, dtype, device, well_conditioned=True):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_broadcasting(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
sizes = (((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)),
((2, 1, 3, 4, 4), (4, 6)),
((4, 4), (2, 1, 3, 4, 2)),
((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)))
for size_A, size_B in sizes:
for left, upper, uni in itertools.product([True, False], repeat=3):
A = make_arg(size_A)
if upper:
A.triu_()
else:
A.tril_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_B)
if not left:
B.transpose_(-2, -1)
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
B_other = A @ X
else:
B_other = X @ A
self.assertEqual(*torch.broadcast_tensors(B, B_other))
def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,
device, dtype):
triangle_function = torch.triu if upper else torch.tril
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = torch.randn(*A_dims, dtype=dtype, device=device)
# create positive definite matrix
A = torch.matmul(A, A.mT)
A_triangular = triangle_function(A)
if unitriangular:
A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)
return b, A_triangular
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve(self, device, dtype):
ks = [0, 1, 3]
ns = [0, 5]
for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,
itertools.product([True, False], repeat=3)):
b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]
if transpose:
self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))
else:
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched(self, device, dtype):
def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0] # Actual output
self.assertEqual(x_act, x_exp) # Equality check
if transpose:
A = A.mT
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0]
self.assertTrue(x.shape == b.shape)
for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):
batchsize = 3
triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
# test empty input
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),
upper, unitriangular, transpose)
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),
upper, unitriangular, transpose)
# test zero batch case
batchsize = 0
triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched_many_batches(self, device, dtype):
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test batched A case
b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A,
upper=upper, transpose=transpose, unitriangular=unitriangular)
if transpose:
A = A.mT
Ax = torch.matmul(A, x)
rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision
self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)
# test batched b case
b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,
unitriangular=unitriangular)
if transpose:
A = A.mT
self.assertEqual(torch.matmul(A, x), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(*floating_and_complex_types())
def test_triangular_solve_batched_broadcasting(self, device, dtype):
from scipy.linalg import solve_triangular as tri_solve
def scipy_tri_solve_batched(A, B, upper, trans, diag):
batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]
single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]
expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),
torch.Size(batch_dims_B)))
expand_A = np.broadcast_to(A, expand_dims + single_dim_A)
expand_B = np.broadcast_to(B, expand_dims + single_dim_B)
flat_A = expand_A.reshape((-1,) + single_dim_A)
flat_B = expand_B.reshape((-1,) + single_dim_B)
flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)
for a, b in zip(flat_A, flat_B)])
return flat_X.reshape(expand_B.shape)
def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),
upper, transpose, unitriangular))
x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]
self.assertEqual(x, x_exp.to(device))
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test against scipy.linalg.solve_triangular
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_triangular_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty_like(b).to(torch.int)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty_like(b)
clone_a = clone_a.to(torch.int)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty(0, dtype=dtype, device=device)
clone_a = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
# Trigger the WARN_ONCE deprecation error
torch.triangular_solve(b, a)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
clone_a = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.triangular_solve(b, a, out=(out, clone_a))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[0].message))
self.assertTrue("An output with one or more elements was resized" in str(w[1].message))
def check_single_matmul(self, x, y):
def assertEqual(answer, expected):
if x.dtype.is_floating_point or x.dtype.is_complex:
k = max(x.shape[-1], 1) # Scale the atol with the size of the matrix
self.assertEqual(answer, expected,
msg=f"{x.shape} x {y.shape} = {answer.shape}",
atol=k * 5e-5,
rtol=1e-4)
else:
self.assertEqual(answer, expected, msg=f"{x.shape} x {y.shape} = {answer.shape}")
# test x @ y
expected = np.matmul(x.cpu(), y.cpu())
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
# test out
out = torch.empty_like(ans)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
def gen_sizes_matmul(self, x_dim, y_dim=4, matrix_size=4, batch_size=3):
"""
Generates sequences of tuples (x, y) of with size(x) = x_dim and
size(y) <= y_dim that are compatible wrt. matmul
"""
assert x_dim >= 1
assert y_dim >= 2
x = x_dim
for y in range(1, y_dim + 1):
for batch, mn in product(product(range(batch_size), repeat=max(x - 2, y - 2, 0)),
product(range(matrix_size), repeat=min(y, 2))):
if x == 1:
size_x = mn[:1]
size_y = batch + mn
yield size_x, size_y
else:
for k in range(matrix_size):
size_x = (k,) + mn[:1]
if x > 2:
size_x = batch[-(x - 2):] + size_x
size_y = mn
if y > 2:
size_y = batch[-(y - 2):] + size_y
yield size_x, size_y
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
def test_matmul_small_brute_force_1d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(1), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
def test_matmul_small_brute_force_2d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(2), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
def test_matmul_small_brute_force_3d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(3), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
def test_linear_algebra_scalar_raises(self, device) -> None:
m = torch.randn(5, 5, device=device)
v = torch.randn(5, device=device)
s = torch.tensor(7, device=device)
self.assertRaises(RuntimeError, lambda: torch.mv(m, s))
self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))
@dtypes(torch.float32, torch.complex64)
def test_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.cross(x, y)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.cross(x, y, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# test for broadcastable inputs
x = torch.rand(1, 3, 2, dtype=dtype, device=device)
y = torch.rand(4, 3, 1, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.cross(x, y, dim=1)
res2 = torch.cross(x, y, dim=-1)
res3 = torch.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.linalg.cross(x, y, dim=-1)
res3 = torch.linalg.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_renorm(self, device):
m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path
res1 = torch.tensor((), device=device)
def renorm(matrix, value, dim, max_norm):
m1 = matrix.transpose(dim, 0).contiguous()
# collapse non-dim dimensions.
m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))
norms = m2.norm(value, 1, True)
# clip
new_norms = norms.clone()
new_norms[torch.gt(norms, max_norm)] = max_norm
new_norms.div_(norms.add_(1e-7))
# renormalize
m1.mul_(new_norms.expand_as(m1))
return m1.transpose(dim, 0)
# note that the axis fed to torch.renorm is different (2~=1)
maxnorm = m1.norm(2, 1).mean()
m2 = renorm(m1, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
self.assertEqual(m1, m2, atol=1e-5, rtol=0)
self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)
m1 = torch.randn(3, 4, 5, device=device)
m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
maxnorm = m2.norm(2, 0).mean()
m2 = renorm(m2, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
self.assertEqual(m3, m2)
self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr(self, device, dtype):
def run_test(batch, m, n, fortran_contiguous):
A = make_tensor((*batch, m, n), dtype=dtype, device=device)
reflectors, tau = torch.geqrf(A)
if not fortran_contiguous:
self.assertTrue(reflectors.mT.is_contiguous())
reflectors = reflectors.contiguous()
# Q is of size m x m
Q, _ = torch.linalg.qr(A, mode='complete')
C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)
C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)
expected = Q @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)
self.assertEqual(expected, actual)
expected = C_left @ Q
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)
self.assertEqual(expected, actual)
expected = Q.mH @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)
self.assertEqual(expected, actual)
expected = C_left @ Q.mH
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)
self.assertEqual(expected, actual)
# if tau is all zeros then the implicit matrix Q is the identity matrix
# so the actual result should be C_right in this case
zero_tau = torch.zeros_like(tau)
actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)
self.assertEqual(C_right, actual)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):
run_test(batch, m, n, fortran_contiguous)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr_errors_and_warnings(self, device, dtype):
test_cases = [
# input1 size, input2 size, input3 size, error regex
((10,), (2,), (2,), r"input must have at least 2 dimensions"),
((2, 2), (2,), (2,), r"other must have at least 2 dimensions"),
((10, 6), (20,), (10, 6), r"other.shape\[-2\] must be greater than or equal to tau.shape\[-1\]"),
((6, 6), (5,), (5, 5), r"other.shape\[-2\] must be equal to input.shape\[-2\]"),
((1, 2, 2), (2, 2), (1, 2, 2), r"batch dimensions of tau to be equal to input.shape\[:-2\]"),
((1, 2, 2), (1, 2), (2, 2, 2), r"batch dimensions of other to be equal to input.shape\[:-2\]"),
]
for a_size, tau_size, c_size, error_regex in test_cases:
a = make_tensor(a_size, dtype=dtype, device=device)
tau = make_tensor(tau_size, dtype=dtype, device=device)
c = make_tensor(c_size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.ormqr(a, tau, c)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args), **kwargs)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out)
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))
self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))
# dot
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_and_complex_types_and(
torch.half,
*[torch.bfloat16] if SM53OrLater else []
))
@dtypes(*all_types_and_complex_and(torch.bfloat16))
def test_corner_cases_of_cublasltmatmul(self, device, dtype):
# common case
M = torch.randn(128, device=device).to(dtype)
m1 = torch.randn(2048, 2400, device=device).to(dtype)
m2 = torch.randn(128, 2400, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
# Ntrans_B has ld >> rows
m1 = torch.rand([128, 2400]).to(dtype).to(device).t()
m2 = torch.rand([2048, 25272]).to(dtype).to(device).t()[21940:24340]
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2.t(), m1)
# trans_A has ld >> rows
m1 = torch.rand([128, 25272]).to(dtype).to(device)[:, 21940:24340].t()
m2 = torch.randn(2048, 2400, device=device).to(dtype)
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2, m1)
# large tensor dim > 65535
M = torch.randn(16, device=device).to(dtype)
m1 = torch.randn(32, 131071 , device=device).to(dtype)
m2 = torch.randn(16, 131071, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
@dtypesIfCUDA(*floating_and_complex_types_and(
torch.half,
*[torch.bfloat16] if SM53OrLater else []
))
@dtypes(*all_types_and_complex_and(torch.bfloat16))
def test_blas_alpha_beta_empty(self, device, dtype):
# This test is disabled on CUDA 9 due to:
# See: https://github.com/pytorch/pytorch/issues/31006
if dtype is torch.bfloat16 and self.device_type == 'xla':
# TODO (@zasdfgbnm): this causes the following error on test
# TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:
#
# RuntimeError: _th_equal not supported on CPUType for BFloat16
return
# ensure beta is respected
value = 11
input = torch.full((2,), value, dtype=dtype, device=device)
mat = torch.ones((2, 0), dtype=dtype, device=device)
vec = torch.ones((0,), dtype=dtype, device=device)
out = torch.empty((2,), dtype=dtype, device=device)
if dtype.is_complex:
alpha = 6 + 7j
beta = 3 + 4j
else:
alpha = 6
beta = 3
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))
# torch.addmm
input = torch.full((2, 3), value, dtype=dtype, device=device)
mat2 = torch.ones((0, 3), dtype=dtype, device=device)
out = torch.empty((2, 3), dtype=dtype, device=device)
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_blas_nan_out(self, device, dtype):
# These functions should work correctly with NaN filled outputs,
# but need special handling, see [NOTE: cpu_zero]
b = 3
n = 5
m = 7
p = 11
# torch.mv
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), float('nan'), device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
"""
This function uses numpy.linalg.qr with mode "raw" to extract output of LAPACK's geqrf.
There is torch.geqrf function but it doesn't work with complex-valued input.
"""
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_linalg_lu_family(self, device, dtype):
# Tests torch.lu
# torch.linalg.lu_factor
# torch.linalg.lu_factor_ex
# torch.lu_unpack
# torch.linalg.lu_solve
# torch.linalg.solve
make_arg_full = partial(make_fullrank_matrices_with_distinct_singular_values, device=device, dtype=dtype)
make_arg = partial(make_tensor, device=device, dtype=dtype)
def run_test(A, pivot, singular, fn):
k = min(A.shape[-2:])
batch = A.shape[:-2]
check_errors = (fn == torch.linalg.lu_factor)
if singular and check_errors:
# It may or may not throw as the LU decomposition without pivoting
# may still succeed for singular matrices
try:
LU, pivots = fn(A, pivot=pivot)
except RuntimeError:
return
else:
LU, pivots = fn(A, pivot=pivot)[:2]
self.assertEqual(LU.size(), A.shape)
self.assertEqual(pivots.size(), batch + (k,))
if not pivot:
self.assertEqual(pivots, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(batch + (k, )))
P, L, U = torch.lu_unpack(LU, pivots, unpack_pivots=pivot)
self.assertEqual(P @ L @ U if pivot else L @ U, A)
PLU = torch.linalg.lu(A, pivot=pivot)
self.assertEqual(P, PLU.P)
self.assertEqual(L, PLU.L)
self.assertEqual(U, PLU.U)
if not singular and A.size(-2) == A.size(-1):
nrhs = ((), (1,), (3,))
for left, rhs in product((True, False), nrhs):
# Vector case when left = False is not allowed
if not left and rhs == ():
continue
if left:
shape_B = A.shape[:-1] + rhs
else:
shape_B = A.shape[:-2] + rhs + A.shape[-1:]
B = make_arg(shape_B)
# Test linalg.lu_solve. It does not support vectors as rhs
# See https://github.com/pytorch/pytorch/pull/74045#issuecomment-1112304913
if rhs != ():
for adjoint in (True, False):
X = torch.linalg.lu_solve(LU, pivots, B, left=left, adjoint=adjoint)
A_adj = A.mH if adjoint else A
if left:
self.assertEqual(B, A_adj @ X)
else:
self.assertEqual(B, X @ A_adj)
# Test linalg.solve
X = torch.linalg.solve(A, B, left=left)
X_ = X.unsqueeze(-1) if rhs == () else X
B_ = B.unsqueeze(-1) if rhs == () else B
if left:
self.assertEqual(B_, A @ X_)
else:
self.assertEqual(B_, X_ @ A)
sizes = ((3, 3), (5, 5), (4, 2), (3, 4), (0, 0), (0, 1), (1, 0))
batches = ((0,), (), (1,), (2,), (3,), (1, 0), (3, 5))
# Non pivoting just implemented for CUDA
pivots = (True, False) if self.device_type == "cuda" else (True,)
fns = (partial(torch.lu, get_infos=True), torch.linalg.lu_factor, torch.linalg.lu_factor_ex)
for ms, batch, pivot, singular, fn in itertools.product(sizes, batches, pivots, (True, False), fns):
shape = batch + ms
A = make_arg(shape) if singular else make_arg_full(*shape)
# Just do one of them on singular matrices
if A.numel() == 0 and not singular:
continue
run_test(A, pivot, singular, fn)
# Reproducer of a magma bug,
# see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
# This is also a bug in cuSOLVER < 11.3
if (dtype == torch.double
and singular
and (torch.version.cuda is None or
torch.version.cuda.split('.') >= ["11", "3"])):
A = torch.ones(batch + ms, dtype=dtype, device=device)
run_test(A, pivot, singular, fn)
# Info should be positive for rank deficient matrices
A = torch.ones(5, 3, 3, device=device)
self.assertTrue((torch.linalg.lu_factor_ex(A, pivot=True).info >= 0).all())
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
fns = [torch.lu, torch.linalg.lu_factor, torch.linalg.lu_factor_ex, torch.linalg.lu]
for f in fns:
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
f(torch.empty(1, 2, 2), pivot=False)
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@setLinalgBackendsToDefaultFinally
@dtypes(*floating_and_complex_types())
def test_linalg_lu_solve(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
backends = ["default"]
if torch.device(device).type == 'cuda':
if torch.cuda.has_magma:
backends.append("magma")
if has_cusolver():
backends.append("cusolver")
def gen_matrices():
rhs = 3
ns = (5, 2, 0)
batches = ((), (0,), (1,), (2,), (2, 1), (0, 2))
for batch, n in product(batches, ns):
yield make_arg(batch + (n, n)), make_arg(batch + (n, rhs))
# Shapes to exercise all the paths
shapes = ((1, 64), (2, 128), (1025, 2))
for b, n in shapes:
yield make_arg((b, n, n)), make_arg((b, n, rhs))
for A, B in gen_matrices():
LU, pivots = torch.linalg.lu_factor(A)
for backend in backends:
torch.backends.cuda.preferred_linalg_library(backend)
for left, adjoint in product((True, False), repeat=2):
B_left = B if left else B.mT
X = torch.linalg.lu_solve(LU, pivots, B_left, left=left, adjoint=adjoint)
A_adj = A.mH if adjoint else A
if left:
self.assertEqual(B_left, A_adj @ X)
else:
self.assertEqual(B_left, X @ A_adj)
@onlyCPU
@dtypes(*floating_and_complex_types())
def test_linalg_lu_cpu_errors(self, device, dtype):
# Square tests
sample = torch.randn(3, 2, 2, device=device, dtype=dtype)
B = torch.randn(3, 2, 2, device=device, dtype=dtype)
LU, pivots = torch.linalg.lu_factor(sample)
# This should run without issues
torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
torch.lu_unpack(LU, pivots)
pivots[0] = 0
with self.assertRaisesRegex(RuntimeError, r"greater or equal to 1"):
torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
pivots[0] = 3
with self.assertRaisesRegex(RuntimeError, r"smaller or equal to LU.size\(-2\)"):
torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
# Rectangular tests
sample = torch.randn(3, 4, 2, device=device, dtype=dtype)
B = torch.randn(3, 4, 2, device=device, dtype=dtype)
LU, pivots = torch.linalg.lu_factor(sample)
# This should run without issues
torch.lu_unpack(LU, pivots)
pivots[0] = 0
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
pivots[0] = 5
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
# Rectangular tests
sample = torch.randn(2, 3, 5, device=device, dtype=dtype)
B = torch.randn(2, 3, 5, device=device, dtype=dtype)
LU, pivots = torch.linalg.lu_factor(sample)
# This should run without issues
torch.lu_unpack(LU, pivots)
pivots[0] = 0
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
pivots[0] = 4
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.linalg.lu_factor(x)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
# check that onces flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue(l.numel() == 0 and u.numel() == 0)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p.numel() == 0)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue(p.numel() == 0 and l.numel() == 0 and u.numel() == 0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
# check dense input
mm = torch.matmul
for batches in [(), (2,), (2, 3)]:
for m, n, k in [
(9, 3, 1),
(9, 3, 2),
(9, 2, 2),
(100, 15, 5),
]:
# skip tests that are known to fail with the basic
# LOBPCG method due to calling cholesky on singular
# input
if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:
continue
A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E.shape, batches + (k,))
self.assertEqual(V.shape, batches + (m, k))
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
e = torch.linalg.eigvalsh(A)
e_smallest = e[..., :k]
self.assertEqual(E, e_smallest)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
e_largest, _ = torch.sort(e[..., -k:], descending=True)
self.assertEqual(E, e_largest, atol=prec, rtol=0)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
# check sparse input
for m, n, k, density in [
(5, 1, 1, 0.8),
(9, 3, 2, 0.5),
(100, 1, 1, 0.1),
(1000, 7, 3, 0.01),
]:
# skip tests that are known to fail with the basic LOBCG
# method due to insufficient accuracy
if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:
continue
A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m
e_smallest = A_eigenvalues[..., :k]
e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E, e_smallest)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
self.assertEqual(E, e_largest)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_torchscript(self, device, dtype):
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
lobpcg = torch.jit.script(torch.lobpcg)
m = 500
k = 5
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
E1, V1 = lobpcg(A1, X=X1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
self.assertLess(eq_err, 1e-6)
@unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and scipy.__version__ < '1.4.1'), "Scipy not found or older than 1.4.1")
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_scipy(self, device, dtype):
"""Compare torch and scipy.sparse.linalg implementations of lobpcg
"""
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
def toscipy(A):
if A.layout == torch.sparse_coo:
values = A.coalesce().values().cpu().numpy().copy()
indices = A.coalesce().indices().cpu().numpy().copy()
return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)
return A.cpu().numpy().copy()
niter = 1000
repeat = 10
m = 500 # size of the square matrix
k = 7 # the number of requested eigenpairs
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
A2 = toscipy(A1)
B2 = toscipy(B1)
X2 = toscipy(X1)
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
tol = 1e-8
# tol for scipy lobpcg will be choosed so that the number of
# iterations will be equal or very close to pytorch lobpcg
# (that is around 170-180)
# Standard eigenvalue problem
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # std
self.assertLess(eq_err_scipy, 1e-6) # std
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Generalized eigenvalue problem
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)
E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # general
self.assertLess(eq_err_scipy, 1e-6) # general
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Timings
elapsed_ortho = 0
elapsed_ortho_general = 0
elapsed_scipy = 0
elapsed_general_scipy = 0
for i in range(repeat):
start = time.time()
torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho += end - start
start = time.time()
torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho_general += end - start
start = time.time()
scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)
end = time.time()
elapsed_scipy += end - start
start = time.time()
scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)
end = time.time()
elapsed_general_scipy += end - start
elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat
elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat
elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat
elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat
print('''
CPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg
-------------------------------------------------------
| standard | generalized | method
torch.lobpcg | {:10.2f} | {:10.2f} | ortho
scipy_lobpcg | {:10.2f} | {:10.2f} | N/A
-(input size: {:4}, eigenpairs:{:2}, units: ms per call)-
'''.format(elapsed_ortho_ms, elapsed_ortho_general_ms,
elapsed_scipy_ms, elapsed_general_scipy_ms,
m, k))
# Handling of very small tolerence
tol = 1e-100
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1 = len(lambdas1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2 = len(lambdas2)
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [standard]:', msg)
iters2 = -1
eq_err_scipy = -1
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1_general = len(lambdas1)
eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2_general = len(lambdas2)
eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [generalized]:', msg)
iters2_general = -1
eq_err_general_scipy = -1
print('''\
Handling of small tol={:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg
----------------------------------------------------------------------------
| standard | generalized | niter | method
torch.lobpcg | {:10.2e} | {:10.2e} | {:6} | ortho
scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
---(input size: {:4}, eigenpairs:{:2}, units: relative error, maxiter={:4})---
'''.format(tol, eq_err, eq_err_general, iters1, eq_err_scipy, eq_err_general_scipy, iters2, m, k, niter))
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False, activation=None):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
if activation == "relu":
res3 = res3 * (res3 > 0)
else:
assert activation is None, f"unsupported activation {activation}"
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8,
torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or SM53OrLater else [],
torch.half))
@dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypesIfCUDA(*floating_types_and(*[torch.bfloat16] if TEST_WITH_ROCM or
SM53OrLater else []))
@dtypes(torch.float, torch.double)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)
else:
a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):
_test(row_major, incx, incy, lda_tail)
def _test_addmm_impl(self, func, activation, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, beta=0, activation=activation)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(func, M, m1, m2, transpose_out=t4, activation=activation)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or SM53OrLater else []))
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_addmm(self, device, dtype):
self._test_addmm_impl(torch.addmm, None, device, dtype)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or SM53OrLater else []))
@dtypes(*floating_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_addmm_activation(self, device, dtype):
self._test_addmm_impl(torch._addmm_activation, "relu", device, dtype)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(*floating_and_complex_types())
@tf32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@dtypes(torch.half)
@onlyCUDA
def test_addmm_baddbmm_overflow(self, device, dtype):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
inp = torch.zeros(128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100
out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)
# just check for no overflow on ROCM
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100
out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@slowTest
@onlyNativeDeviceTypes
# bfloat16 doesn't have sufficient precision to pass this test
@dtypes(torch.float32, torch.float64, torch.int32, torch.int64, torch.cfloat, torch.cdouble)
@dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)
@tf32_on_and_off(0.01)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@onlyNativeDeviceTypes
def test_mm_bmm_non_memory_dense(self, device):
def _slice(tensor, fn):
return fn(tensor)[..., ::2]
A = torch.randn(3, 6, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
A_conj = _slice(A, torch.conj)
A_conj_physical = _slice(A, torch.conj_physical)
self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))
self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))
Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)
Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)
Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)
out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT
Ab_conj = _slice(Ab, torch.conj)
Ab_conj_physical = _slice(Ab, torch.conj_physical)
def t_b(tensor):
return tensor.mT
self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))
# test broadcasting
self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_bmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or SM53OrLater
if not is_supported:
for num_batches in batch_sizes:
b1 = torch.randn(num_batches, M, N, device=device).to(dtype)
b2 = torch.randn(num_batches, N, O, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.bmm(b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-0.1, high=0.1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-0.1, high=0.1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-0.1, high=0.1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-0.1, high=0.1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == 'cuda':
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2),
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1., .5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1., alpha=.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)
self.assertEqual(res4, ref * 3),
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)
self.assertEqual(res6, out_tensor * .1j + .5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)
self.assertEqual(res6, out_tensor * .1 + .5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_addbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 2
M, N, O = 16, 17, 18
is_supported = True
if dtype == torch.bfloat16:
if self.device_type == 'cpu':
self.precision = 1 # 43 vs 43.75
else:
is_supported = TEST_WITH_ROCM or SM53OrLater
if not is_supported:
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
t = make_tensor((M, O), dtype=dtype, device=device, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.addbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
for perm3 in itertools.permutations((0, 1)):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1) * 0.1
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1) * 0.1
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N) * 0.1
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1) * 0.1
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_baddbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 10
M, N, O = 12, 8, 50
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or SM53OrLater
if not is_supported:
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
t = make_tensor((num_batches, M, O), dtype=dtype, device=device, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.baddbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-2, high=2)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(M):
# Testing against definition for pseudo-inverses
MPI = torch.pinverse(M)
MPI_ = MPI.cpu().numpy()
M_ = M.cpu().numpy()
if M.numel() > 0:
self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))
self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))
self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())
self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())
else:
self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices
(3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
M = torch.randn(*sizes, dtype=dtype, device=device)
run_test(M)
# Test inverse and pseudo-inverse for invertible matrix
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:
matsize = sizes[-1]
batchdims = sizes[:-2]
M = make_arg(*batchdims, matsize, matsize)
self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),
atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_non_negative(self, device, dtype):
def check(*size):
t = make_tensor(size, dtype=dtype, device=device)
for n in range(8):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(1, 1)
check(5, 5)
check(0, 3, 3)
check(2, 3, 3)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_negative(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def check(*size):
t = make_arg(*size)
for n in range(-7, 0):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(5, 5)
check(2, 0, 0)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.complex64)
def test_linalg_matrix_exp_utils(self, device, dtype):
# test linear combination
def run_test(coeff_shape, data_shape):
coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)
x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)
res1 = torch._compute_linear_combination(x, coeffs)
res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)
self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)
# check `out=` version
res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res3)
self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)
res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res4)
self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)
res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
res5_clone = res5.clone()
torch._compute_linear_combination(x, coeffs, out=res5)
self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)
run_test([1, 3], [2, 2])
run_test([3, 1], [2, 2])
run_test([1, 10], [10, 10])
run_test([10, 1], [10, 10])
run_test([5, 3], [2, 2])
run_test([5, 3], [100, 100])
run_test([3, 4], [3, 3, 3])
run_test([3, 4], [3, 3, 3, 3])
# Regression test for https://github.com/pytorch/pytorch/issues/94124
with self.assertRaises(RuntimeError):
x = torch.rand([], device=device, dtype=dtype)
coeffs = torch.rand([2, 2], device=device, dtype=dtype)
res = torch._compute_linear_combination(x, coeffs)
@onlyCPU
@skipCPUIfNoLapack
@dtypes(torch.complex64)
def test_linalg_matrix_exp_no_warnings(self, device, dtype):
# this tests https://github.com/pytorch/pytorch/issues/80948
with freeze_rng_state():
torch.manual_seed(42)
tens = 0.5 * torch.randn(10, 3, 3, dtype=dtype, device=device)
tens = (0.5 * (tens.transpose(-1, -2) + tens))
with warnings.catch_warnings(record=True) as w:
tens.imag = torch.matrix_exp(tens.imag)
self.assertFalse(len(w))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_boundary_cases(self, device, dtype):
expm = torch.linalg.matrix_exp
with self.assertRaisesRegex(RuntimeError, "Expected a floating point or complex tensor"):
expm(torch.randn(3, 3).type(torch.int))
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
expm(torch.randn(3))
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
expm(torch.randn(3, 2, 1))
# check 1x1 matrices
x = torch.randn(3, 3, 1, 1)
self.assertEqual(expm(x), x.exp())
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_analytic(self, device, dtype):
expm = torch.linalg.matrix_exp
# check zero matrix
x = torch.zeros(20, 20, dtype=dtype, device=device)
self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def run_test(*n):
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate input
q = gen_good_cond_number_matrices(*n)
q_ = q.cpu().numpy()
qinv = torch.inverse(q)
qinv_ = qinv.cpu().numpy()
d = torch.randn(n[:-1], dtype=dtype, device=device)
x = torch.from_numpy(
np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)
x_norm, _ = x.abs().sum(-2).max(-1)
# test simple analytic whatever norm generated
mexp = expm(x)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed(d.exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
# matrices to equal norm
for sample_norm in sample_norms:
x_normalized = normalize_to_1_operator_norm(x, sample_norm)
mexp = expm(x_normalized)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
run_test(100, 100)
run_test(200, 200)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
run_test(3, 100, 100)
run_test(3, 200, 200)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
run_test(3, 3, 100, 100)
run_test(3, 3, 200, 200)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
def test_linalg_matrix_exp_batch(self, device, dtype):
def run_test(*n):
tensors_batch = torch.zeros(n, dtype=dtype, device=device)
tensors_batch = tensors_batch.view(-1, n[-2], n[-1])
num_matrices = tensors_batch.size(0)
tensors_list = []
for i in range(num_matrices):
tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))
for i in range(num_matrices):
tensors_batch[i, ...] = tensors_list[i]
tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)
tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)
for i, tensor_exp in enumerate(tensors_exp_map):
self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def get_taylor_approximation(a, deg):
a_ = a.cpu().numpy()
identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)
res = identity.cpu().numpy()
taylor_term = identity.cpu().numpy()
for i in range(1, deg + 1):
taylor_term = np.matmul(a_, taylor_term) / i
res = res + taylor_term
return res
def scale_square(a, deg):
if a.abs().pow(2).sum().sqrt() < 1.0:
return get_taylor_approximation(a, 12)
else:
s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())
b = a / (2 ** s)
b = get_taylor_approximation(b, 18)
for _ in range(s):
b = np.matmul(b, b)
return torch.from_numpy(b).to(a.device)
def run_test(*n):
degs = [1, 2, 4, 8, 12, 18]
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
degs = [degs[0]] + degs
for sample_norm, deg in zip(sample_norms, degs):
x = gen_good_cond_number_matrices(*n)
x = normalize_to_1_operator_norm(x, sample_norm)
mexp = torch.linalg.matrix_exp(x)
mexp_taylor = scale_square(x, deg)
self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'Low precision dtypes not supported'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
@skipCUDAIf(torch.version.cuda is not None
and torch.version.cuda.split(".") < ["11", "3"], "There's a bug in cuSOLVER < 11.3")
# FIXME One of the backends of lu_factor fails in windows. I haven't investigated which or why
# https://github.com/pytorch/pytorch/issues/75225
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
# this matrix.
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
# Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
# There are two code paths currently for the out= variant
# 1. When 'out' tensor is in Fortran (column-major) memory format
# then the fast route is taken and the storage is reused directly in the computations
# 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally
# and the result is copied from the temporary tensor to 'out' tensor
# This test checks the first code path
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
# This test checks the second code path
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
# cholesky_inverse requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
# cholesky_inverse requires a square matrix
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.cholesky_inverse(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
# cholesky_inverse raises an error for invalid inputs on CPU
# for example if at least one diagonal element is zero
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(torch.linalg.LinAlgError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
# cholesky_inverse on GPU does not raise an error for this case
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@tf32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
# test torch.matmul function as well
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
# test torch.matmul with out
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
# compare to bmm
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
LU_data, LU_pivots, info = torch.linalg.lu_factor_ex(A)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n, n), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
lu_solve_batch_test_helper((batchsize, 5, 5), (batchsize, 5, 10), pivot)
# Tests tensors with 0 elements
b = torch.randn(3, 0, 3, dtype=dtype, device=device)
A = torch.randn(3, 0, 0, dtype=dtype, device=device)
LU_data, LU_pivots = torch.linalg.lu_factor(A)
self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCUDAIfRocm # ROCm: test was exceptionally slow, even for slow tests. Skip until triage.
@slowTest
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_many_batches(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((65536, 5, 5), (65536, 5, 10))
run_test((262144, 5, 5), (262144, 5, 10))
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_broadcasting(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
def run_test(A_dims, b_dims, pivot=True):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = make_A(*A_batch_dims, A_matrix_size, A_matrix_size)
b = make_tensor(b_dims, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu(), b.cpu())
LU_data, LU_pivots = torch.linalg.lu_factor(A)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
# this tests https://github.com/pytorch/pytorch/issues/36921
def test_lu_solve_large_matrices(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((1, 1), (1, 1, 1025))
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
def test_pca_lowrank(self, device):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
dtype = torch.double
def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
u, s, v = pca(a_input, q=guess_rank, **options)
self.assertEqual(s.shape[-1], guess_rank)
self.assertEqual(u.shape[-2], rows)
self.assertEqual(u.shape[-1], guess_rank)
self.assertEqual(v.shape[-1], guess_rank)
self.assertEqual(v.shape[-2], columns)
A1 = u.matmul(s.diag_embed()).matmul(v.mT)
ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)
c = a.sum(axis=-2) / rows
c = c.reshape(batches + (1, columns))
A2 = a - ones_m1.matmul(c)
self.assertEqual(A1, A2)
if density == 1:
# actual rank is known only for dense input
detect_rank = (s.abs() > 1e-5).sum(axis=-1)
self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)
S = torch.linalg.svdvals(A2)
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
(12, (1000, 1000), [()]),
]:
for batches in all_batches:
for guess_rank in [
actual_rank,
actual_rank + 2,
actual_rank + 6,
]:
if guess_rank <= min(*size):
run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)
# sparse input
for guess_rank, size in [
(4, (17, 4)), (4, (4, 17)), (16, (17, 17)),
(21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:
for density in [0.005, 0.1]:
run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.pca_lowrank)
guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()
run_subtest(guess_rank, actual_rank, size, batches, device, jitted)
# Ensure that nuclear_norm's out variant gives the same result as the non-out
@onlyNativeDeviceTypes
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64)
def test_nuclear_norm_out(self, device, dtype):
test_cases = [
# input size, dim
((25, 25), None),
((25, 25), (0, 1)),
((25, 25), (1, 0)),
((25, 25, 25), (2, 0)),
((25, 25, 25), (0, 1)),
]
for keepdim in [False, True]:
for input_size, dim in test_cases:
msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'
x = torch.randn(*input_size, device=device, dtype=dtype)
result_out = torch.empty(0, device=device, dtype=dtype)
if dim is None:
result = torch.nuclear_norm(x, keepdim=keepdim)
torch.nuclear_norm(x, keepdim=keepdim, out=result_out)
else:
result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)
torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_geqrf(self, device, dtype):
def run_test(shape):
# numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf
# so this test compares against that function
A = make_tensor(shape, dtype=dtype, device=device)
# numpy.linalg.qr doesn't work with batched input
m, n = A.shape[-2:]
tau_size = "n" if m > n else "m"
np_dtype = A.cpu().numpy().dtype
ot = [np_dtype, np_dtype]
numpy_geqrf_batched = np.vectorize(
lambda x: np.linalg.qr(x, mode='raw'),
otypes=ot,
signature=f'(m,n)->(n,m),({tau_size})')
expected = numpy_geqrf_batched(A.cpu())
actual = torch.geqrf(A)
# numpy.linalg.qr returns transposed result
self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])
self.assertEqual(expected[1], actual[1])
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_lapack_empty(self, device):
# FIXME: these are just a selection of LAPACK functions -- we need a general strategy here.
# The LAPACK functions themselves generally do NOT work with zero sized dimensions, although
# numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that "does the right thing"
# (e.g. lu). We often name our functions identically to the lapack function, so it will take work
# to name / migrate-to better wrappers.
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args))
# inverse, pinverse
self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)
self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)
self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)
self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)
# det, logdet, slogdet
self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))
self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),
fn(torch.slogdet, (0, 0)))
@tf32_on_and_off(0.005)
def test_tensordot(self, device):
a = torch.arange(60., device=device).reshape(3, 4, 5)
b = torch.arange(24., device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=([1, 0], [0, 1])))
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)
an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))
self.assertEqual(a, an)
@skipCUDAIfNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAIfRocm
@dtypes(*floating_and_complex_types())
def test_ldl_factor(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, hermitian):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
actual_factors, actual_pivots, info = torch.linalg.ldl_factor_ex(A, hermitian=hermitian)
actual_L = torch.tril(actual_factors, diagonal=-1)
actual_L.diagonal(0, -2, -1).fill_(1.0)
# This test is designed only for inputs with 1x1 block diagonal matrix D.
# That is for positive definite input matrices, the pivots tensor is always > 0.
# If negative pivots are encountered, it means that the input matrix is not positive definite.
# And matrix D is a 2x2 block diagonal matrix.
self.assertTrue((actual_pivots > 0).all())
# Construct a 1x1 block diagonal matrix D from factors.
actual_D = torch.diag_embed(actual_factors.diagonal(0, -2, -1))
def T(x):
return x.mH if hermitian else x.mT
A_reconstructed = actual_L @ actual_D @ T(actual_L)
def symmetric(A):
return A.tril() + A.tril(-1).mT
self.assertEqual(symmetric(A) if not hermitian else A, A_reconstructed)
# Now test against SciPy implementation
if TEST_SCIPY:
from scipy.linalg import ldl as scipy_ldl
A_np = A.cpu().numpy()
np_dtype = A_np.dtype
scipy_ldl_batched = np.vectorize(
lambda x: scipy_ldl(x, hermitian=hermitian, lower=True),
otypes=[np_dtype, np_dtype, np.dtype('int64')],
signature='(m,m)->(m,m),(m,m),(m)')
expected = scipy_ldl_batched(A_np)
expected_L, expected_D, expected_pivots = expected
if expected_pivots.ndim > 1:
permuted_expected_L = np.stack(
[expected_L[i][expected_pivots[i], :] for i in range(expected_pivots.shape[0])]
)
else:
permuted_expected_L = expected_L[expected_pivots, :]
self.assertEqual(actual_L, permuted_expected_L)
self.assertEqual(actual_D, expected_D)
else:
self.assertEqual(actual_factors.shape, A.shape)
self.assertEqual(actual_pivots.shape, A.shape[:-1])
self.assertEqual(info.shape, A.shape[:-2])
# hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+
magma_254_available = self.device_type == 'cuda' and _get_magma_version() >= (2, 5, 4)
hermitians = (True, False) if dtype.is_complex and (self.device_type == 'cpu' or magma_254_available) else (False,)
shapes = (5,)
batches = ((), (4,),)
for shape, batch, hermitian in itertools.product(shapes, batches, hermitians):
run_test(shape, batch, hermitian)
@skipCUDAIfNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAIfRocm
@skipCUDAIf(_get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1")
@dtypes(*floating_and_complex_types())
def test_ldl_solve(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, nrhs, hermitian):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
B = make_tensor((*A.shape[:-1], nrhs), dtype=dtype, device=device)
factors, pivots, info = torch.linalg.ldl_factor_ex(A, hermitian=hermitian)
X = torch.linalg.ldl_solve(factors, pivots, B, hermitian=hermitian)
def symmetric(A):
return A.tril() + A.tril(-1).mT
# verify A @ X == B
expected_B = symmetric(A) @ X if not hermitian else A @ X
self.assertEqual(B, expected_B)
# hermitian=True is not supported on CUDA yet
hermitians = (True, False) if dtype.is_complex and self.device_type == 'cpu' else (False,)
shapes = (5,)
batches = ((), (4,), (2, 2))
nrhss = (1, 7)
for shape, batch, nrhs, hermitian in itertools.product(shapes, batches, nrhss, hermitians):
run_test(shape, batch, nrhs, hermitian)
@onlyCUDA
@skipCUDAIfNoMagma
@skipCUDAIfNoCusolver
@setLinalgBackendsToDefaultFinally
def test_preferred_linalg_library(self):
# The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions.
x = torch.randint(2, 5, (2, 4, 4), device='cuda', dtype=torch.double)
torch.backends.cuda.preferred_linalg_library('cusolver')
out1 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('magma')
out2 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('default')
# Although linalg preferred flags doesn't affect CPU currently,
# we set this to make sure the flag can switch back to default normally.
out_ref = torch.linalg.inv(x.cpu())
self.assertEqual(out_ref, out1.cpu())
self.assertEqual(out1, out2)
def test_permute_matmul(self):
a = torch.ones([2, 5, 24, 24])
b = torch.ones([3, 2, 5, 24, 24])
c = a.permute(0, 1, 3, 2).matmul(b)
self.assertEqual([c.min(), c.max(), c.sum()], [24, 24, 414720])
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
run_tests()
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_linalg.py
|
_fn
|
def _fn(*args, **kwargs):
try:
fn(*args, **kwargs)
finally:
# Set linalg backend back to default to make sure potential failures in one test
# doesn't affect other linalg tests
torch.backends.cuda.preferred_linalg_library('default')
return _fn
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_linalg.py
|
blaslt_supported_device
|
def blaslt_supported_device():
if torch.cuda.is_available():
if torch.version.hip:
for arch in ['gfx90a', 'gfx94']:
if arch in torch.cuda.get_device_properties(0).gcnArchName:
return True
else:
return True
return False
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
check_correctness_numpy
|
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_available = (version >= (10, 2))
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (cusolver_available or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
# so we skip this case
continue
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
|
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (has_cusolver() or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
# so we skip this case
continue
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
test_vector_norm
|
def test_vector_norm(self, device, dtype):
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
# The operation does not have an identity.
error_msg = "linalg.vector_norm cannot compute"
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
else:
msg = (f'input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}')
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
if dtype.is_complex:
result_dtype_reference = result_dtype_reference.real
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
ref = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
actual = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(ref, actual, msg=msg)
if dtype == torch.cfloat:
norm_dtypes = (None, torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (None, torch.cdouble)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (None, torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (None, torch.double)
else:
raise RuntimeError("Unsupported dtype")
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
|
def test_vector_norm(self, device, dtype):
if IS_ARM64 and device == 'cpu' and dtype in [torch.float16, torch.bfloat16, torch.float32]:
raise unittest.SkipTest("Fails on ARM, see https://github.com/pytorch/pytorch/issues/125438")
# have to use torch.randn(...).to(bfloat16) instead of
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(1, ),
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
# The operation does not have an identity.
error_msg = "linalg.vector_norm cannot compute"
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
else:
msg = (f'input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}')
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
if dtype.is_complex:
result_dtype_reference = result_dtype_reference.real
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
ref = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
actual = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(ref, actual, msg=msg)
if dtype == torch.cfloat:
norm_dtypes = (None, torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (None, torch.cdouble)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (None, torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (None, torch.double)
else:
raise RuntimeError("Unsupported dtype")
for amp in [False, True]:
with torch.autocast(device_type=device, enabled=amp):
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
__init__
|
def __init__(self, test, norm):
self.norm = norm
self.saved_overrides = getattr(test, 'precision_overrides', None)
self.target_test = test
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
class PrecisionContext:
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
__enter__
|
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
|
def __enter__(self):
if 'nuc' != self.norm:
return None
self.target_test.precision_overrides = {torch.float: 1e-4, torch.cfloat: 2e-4}
return self.target_test.precision_overrides
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
class PrecisionContext:
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_linalg.py
|
run_subtest
|
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as torch.svd
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
|
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = (u * s.unsqueeze(-2)).matmul(v.mH)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as linalg.svdvals
U, S, Vh = torch.linalg.svd(a, full_matrices=False)
V = Vh.mH
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, v = u[..., :actual_rank], v[..., :actual_rank]
U, V = U[..., :actual_rank], V[..., :actual_rank]
expected_ones = u.mH.matmul(U).det().abs()
self.assertEqual(expected_ones, torch.ones_like(expected_ones))
self.assertEqual(v.mH.matmul(V).det().abs(), torch.ones_like(expected_ones))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [ # noqa: B020
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
_gen_shape_inputs_linalg_triangular_solve
|
def _gen_shape_inputs_linalg_triangular_solve(self, shape, dtype, device, well_conditioned=False):
make_arg = partial(make_tensor, dtype=dtype, device=device)
make_randn = partial(torch.randn, dtype=dtype, device=device)
b, n, k = shape
for left, uni, expand_a, tr_a, conj_a, expand_b, tr_b, conj_b in product((True, False), repeat=8):
# expand means that we generate a batch of matrices with a stride of zero in the batch dimension
if (conj_a or conj_b) and not dtype.is_complex:
continue
# We just expand on the batch size
if (expand_a or expand_b) and b == 1:
continue
size_a = (b, n, n) if left else (b, k, k)
size_b = (b, n, k) if not tr_b else (b, k, n)
# If expand_a or expand_b, we'll expand them to the correct size later
if b == 1 or expand_a:
size_a = size_a[1:]
if b == 1 or expand_b:
size_b = size_b[1:]
if well_conditioned:
PLU = torch.linalg.lu(make_randn(*size_a))
if uni:
# A = L from PLU
A = PLU[1].transpose(-2, -1).contiguous()
else:
# A = U from PLU
A = PLU[2].contiguous()
else:
A = make_arg(size_a)
A.triu_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_b)
if tr_a:
A.transpose_(-2, -1)
if tr_b:
B.transpose_(-2, -1)
if conj_a:
A = A.conj()
if conj_b:
B = B.conj()
if expand_a:
A = A.expand(b, *size_a)
if expand_b:
B = B.expand(b, n, k)
yield A, B, left, not tr_a, uni
|
def _gen_shape_inputs_linalg_triangular_solve(self, shape, dtype, device, well_conditioned=False):
make_arg = partial(make_tensor, dtype=dtype, device=device)
make_fullrank = partial(make_fullrank_matrices_with_distinct_singular_values, dtype=dtype, device=device)
b, n, k = shape
for left, uni, expand_a, tr_a, conj_a, expand_b, tr_b, conj_b in product((True, False), repeat=8):
# expand means that we generate a batch of matrices with a stride of zero in the batch dimension
if (conj_a or conj_b) and not dtype.is_complex:
continue
# We just expand on the batch size
if (expand_a or expand_b) and b == 1:
continue
size_a = (b, n, n) if left else (b, k, k)
size_b = (b, n, k) if not tr_b else (b, k, n)
# If expand_a or expand_b, we'll expand them to the correct size later
if b == 1 or expand_a:
size_a = size_a[1:]
if b == 1 or expand_b:
size_b = size_b[1:]
if well_conditioned:
PLU = torch.linalg.lu(make_fullrank(*size_a))
if uni:
# A = L from PLU
A = PLU[1].transpose(-2, -1).contiguous()
else:
# A = U from PLU
A = PLU[2].contiguous()
else:
A = make_arg(size_a)
A.triu_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_b)
if tr_a:
A.transpose_(-2, -1)
if tr_b:
B.transpose_(-2, -1)
if conj_a:
A = A.conj()
if conj_b:
B = B.conj()
if expand_a:
A = A.expand(b, *size_a)
if expand_b:
B = B.expand(b, n, k)
yield A, B, left, not tr_a, uni
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
test_lobpcg_ortho
|
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
|
def test_lobpcg_ortho(self, device, dtype):
if torch.version.hip:
torch.backends.cuda.preferred_linalg_library('magma')
self._test_lobpcg_method(device, dtype, 'ortho')
if torch.version.hip:
torch.backends.cuda.preferred_linalg_library('default')
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
genf_int_float
|
def genf_int_float(x, y, use_transpose):
if use_transpose:
x, y = y, x
x_int8 = torch.randint(-10, 10, (x, y), dtype=torch.int8, device=device)
x_float = x_int8.to(torch.float32)
if use_transpose:
return x_int8.t(), x_float.t()
return x_int8, x_float
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
maybe_transpose
|
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(func, M, m1, m2, transpose_out=t4, activation=activation)
|
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(func, M, m1, m2, transpose_out=t4, activation=activation)
if t1:
# use vector V instead of matrix M for epilogue fusion in CUDA (doesn't depend on t1)
self._test_addmm_addmv(func, V, m1, m2, beta=1, transpose_out=t4, activation=activation,)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
genf_int
|
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
|
def genf_int(x, y):
return torch.empty((x, y), dtype=torch.int8, device=device)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
_gen_pair
|
def _gen_pair(m, k, n):
return genf_int(m, k), genf_int(k, n)
self.assertRaisesRegex(RuntimeError,
r"self.size\(0\) needs to be greater than 16, but got 16",
lambda: torch._int_mm(*_gen_pair(16, 8, 32)))
self.assertRaisesRegex(RuntimeError,
r"self.size\(1\) needs to be greater than 0 and a multiple of 8, but got 7",
lambda: torch._int_mm(*_gen_pair(17, 7, 32)))
self.assertRaisesRegex(RuntimeError,
r"self.size\(1\) needs to match mat2.size\(0\) but got 8 and 7",
lambda: torch._int_mm(genf_int(17, 8), genf_int(7, 32)))
self.assertRaisesRegex(RuntimeError,
r"mat2.size\(1\) needs to be greater than 0 and a multiple of 8, but got 31",
lambda: torch._int_mm(*_gen_pair(17, 8, 31)))
self.assertRaisesRegex(RuntimeError,
r"expected scalar type Char but found Float",
lambda: torch._int_mm(genf_int(17, 8).float(), genf_int(8, 32)))
self.assertRaisesRegex(RuntimeError,
r"expected scalar type Char but found Float",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32).float()))
self.assertRaisesRegex(RuntimeError,
r"Expected result dtype to be of type kInt but got float",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32), out=genf_int(16, 32).float()))
self.assertRaisesRegex(RuntimeError,
r"Expected result.size\(0\) to be 17 but got 15",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32), out=genf_int(15, 32).int()))
self.assertRaisesRegex(RuntimeError,
r"Expected result.size\(0\) to be 17 but got 16",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32), out=genf_int(16, 31).int()))
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
genf_int_float
|
def genf_int_float(x, y, use_transpose):
if use_transpose:
x, y = y, x
x_int8 = torch.randint(-10, 10, (x, y), dtype=torch.int8, device=device)
x_float = x_int8.to(torch.float32)
if use_transpose:
return x_int8.t(), x_float.t()
return x_int8, x_float
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
convert_weight_to_int4pack
|
def convert_weight_to_int4pack(b):
b_uint8, b_scales_and_zeros = _group_quantize_tensor(
b, n_bit=4, q_group_size=q_group
)
b_int4pack = torch._convert_weight_to_int4pack(
b_uint8, inner_k_tiles
)
return b_int4pack, b_scales_and_zeros
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
weight_int4pack_mm
|
def weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros):
return torch._weight_int4pack_mm(
a, b_int4pack, q_group, b_scales_and_zeros
)
b_int4pack, b_scales_and_zeros_bf16 = convert_weight_to_int4pack(b_bf16)
for dtype in [torch.bfloat16] + ([torch.float16, torch.float32] if device == "cpu" else []):
a = a_bf16.to(dtype=dtype)
b = b_bf16.to(dtype=dtype)
b_scales_and_zeros = b_scales_and_zeros_bf16.to(dtype=dtype)
ref = torch.mm(a, b)
res = weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
int4_mm
|
def int4_mm(a, b_int32, b_scales_and_zeros):
b_int4pack = torch._convert_weight_to_int4pack(
b_int32, inner_k_tiles
)
return torch._weight_int4pack_mm(
a, b_int4pack, q_group, b_scales_and_zeros
)
res = int4_mm(a, b_int32, b_scales_and_zeros)
ref = torch.mm(a, b)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
convert_weight_to_int8pack
|
def convert_weight_to_int8pack(b):
b_int8pack, b_scales, _ = _dynamically_quantize_per_channel(
b, -128, 127, torch.int8
)
return b_int8pack, b_scales
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
weight_int8pack_mm
|
def weight_int8pack_mm(a, b_int8pack, b_scales):
return torch._weight_int8pack_mm(
a, b_int8pack, b_scales
)
b_int8pack, b_scales = convert_weight_to_int8pack(b)
res = weight_int8pack_mm(a, b_int8pack, b_scales)
ref = torch.mm(a, b.transpose(0, 1))
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
int8_mm
|
def int8_mm(a, b_int8pack, b_scales):
return torch._weight_int8pack_mm(
a, b_int8pack, b_scales
)
res = int8_mm(a, b_int8pack, b_scales)
ref = torch.mm(a, b.transpose(0, 1))
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
_test_mm
|
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
|
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
dtype_ = torch.float if dtype == torch.half else dtype
if dtype == torch.half:
mat1 = mat1.float()
mat2 = mat2.float()
res = torch.zeros(n, p, dtype=dtype_, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res.half() if dtype == torch.half else res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
matrixmultiply
|
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
|
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
dtype_ = torch.float if dtype == torch.half else dtype
if dtype == torch.half:
mat1 = mat1.float()
mat2 = mat2.float()
res = torch.zeros(n, p, dtype=dtype_, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res.half() if dtype == torch.half else res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
genf_int
|
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
|
def genf_int(x, y):
return torch.empty((x, y), dtype=torch.int8, device=device)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
_compare_forward_backward
|
def _compare_forward_backward(data, mask, fn):
mt = masked_tensor(data, mask, requires_grad=True)
masked_res = fn(mt)
masked_res.sum().backward()
t = data.masked_fill(~mask, float("-inf")).detach().clone().requires_grad_()
tensor_res = fn(t)
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mt.grad, t.grad, atol=1e-06)
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_maskedtensor.py
|
_create_random_mask
|
def _create_random_mask(shape, device):
return torch.randint(0, 2, shape, device=device).bool()
|
def _create_random_mask(shape, device):
return make_tensor(shape, device=device, dtype=torch.bool)
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
_fix_fn_name
|
def _fix_fn_name(fn_name):
if fn_name[-1] == "_":
fn_name = fn_name[:-1]
return fn_name
class TestBasics(TestCase):
def test_invalid_tensor_inputs(self, device):
data = torch.randn((3, 4), device=device)
mask = _create_random_mask((3, 4), device=device)
mt = masked_tensor(data, mask)
with self.assertRaisesRegex(TypeError, "data must be a Tensor"):
masked_tensor(mt, mask)
with self.assertRaisesRegex(TypeError, "data must be a Tensor"):
masked_tensor(0, mask)
with self.assertRaisesRegex(TypeError, "mask must be a Tensor"):
masked_tensor(data, mt)
with self.assertRaisesRegex(TypeError, "mask must be a Tensor"):
masked_tensor(data, 0)
def test_diff_layouts(self, device):
data = torch.randn((3, 4), device=device).to_sparse_coo()
mask = _create_random_mask((3, 4), device=device)
with self.assertRaisesRegex(TypeError, "data and mask must have the same layout"):
masked_tensor(data, mask)
def test_diff_dim(self, device):
data = torch.randn((3, 4, 5), device=device)
mask = _create_random_mask((3, 4), device=device)
with self.assertRaisesRegex(ValueError, "data.dim\\(\\) must equal mask.dim\\(\\)"):
masked_tensor(data, mask)
def test_diff_sizes(self, device):
data = torch.randn((3, 4), device=device)
mask = _create_random_mask((3, 3), device=device)
with self.assertRaisesRegex(ValueError, "data.size\\(\\) must equal mask.size\\(\\)"):
masked_tensor(data, mask)
def test_grad_warning(self, device):
data = torch.randn((3, 4), device=device, requires_grad=True)
mask = _create_random_mask((3, 4), device=device)
msg = "It is not recommended to create a MaskedTensor with a tensor that requires_grad."
with self.assertWarnsRegex(UserWarning, msg):
mt = masked_tensor(data, mask)
def test_add(self, device):
data = torch.arange(5.0, device=device)
mask = torch.tensor([True, True, False, True, False], device=device)
m0 = masked_tensor(data, mask)
m1 = masked_tensor(data, ~mask)
with self.assertRaisesRegex(ValueError, "Input masks must match."):
m0 + m1
_compare_mts(m0 + m0, masked_tensor(torch.tensor([0., 2, 0, 6, 0], device=device), mask))
def test_softmax(self, device):
data = torch.randn((3, 4), device=device) * 0.1
mask = torch.tensor(
[
[True, True, True, False],
[False, True, False, True],
[True, True, False, False],
],
device=device
)
mt = masked_tensor(data, mask, requires_grad=True)
masked_res = torch.softmax(mt, -1)
masked_res.sum().backward()
xinf = data.masked_fill(~mask, float("-inf")).detach().clone().requires_grad_()
tensor_res = torch.softmax(xinf, -1)
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mt.grad, xinf.grad, atol=1e-06)
def test_where(self, device):
data = torch.tensor([-10.0, -5, 0, 5, 10, 50, 60, 70, 80, 90, 100], device=device)
mask = data < 0
mx = masked_tensor(data, mask, requires_grad=True)
my = masked_tensor(torch.ones_like(data), ~mask, requires_grad=True)
masked_res = torch.where(mask, torch.exp(mx), my)
masked_res.sum().backward()
x = data.detach().clone().requires_grad_()
y = torch.ones_like(x, device=device, requires_grad=True)
tensor_res = torch.where(mask, torch.exp(x), y)
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mx.grad, x.grad)
_compare_mt_t(my.grad, y.grad)
def test_to_sparse(self, device):
for sample in _generate_sample_data(device=device):
data = sample.input
mask = sample.kwargs["mask"]
mt = masked_tensor(data.clone().detach(), mask, requires_grad=True)
sparse_mt = mt.to_sparse()
data.to_sparse().to_dense().sum().backward()
sparse_mt.to_dense().sum().backward()
_compare_mt_t(sparse_mt, data)
_compare_mt_t(mt.grad, data.grad)
def test_to_dense(self, device):
samples = _generate_sample_data(
device=device,
layout=torch.sparse_coo
) + _generate_sample_data(device=device, layout=torch.sparse_csr)
for sample in samples:
data = sample.input
mask = sample.kwargs["mask"]
mt = masked_tensor(data, mask, requires_grad=True)
dense_data = data.to_dense().detach().clone().requires_grad_(True)
dense_mt = mt.to_dense()
dense_data.sum().backward()
dense_mt.sum().backward()
_compare_mt_t(dense_mt, dense_data)
_compare_mt_t(mt.grad.to_dense(), dense_data.grad)
def test_to_dense_and_sparse_coo(self, device):
for sample in _generate_sample_data(device=device, layout=torch.strided):
data = sample.input
mask = sample.kwargs["mask"]
ms = mask.to_sparse_coo().coalesce()
mt = masked_tensor(data, mask, requires_grad=True)
mts = masked_tensor(data.sparse_mask(ms), ms, requires_grad=True)
converted = mt.to_sparse().to_dense()
converted.sum().backward()
converted2 = mts.to_dense()
converted2.sum().backward()
_compare_mts(converted, converted2)
_compare_mts(mt.grad, mts.grad.to_dense())
def test_to_dense_and_sparse_csr(self, device):
for sample in _generate_sample_data(device=device, layout=torch.strided):
data = sample.input
mask = sample.kwargs["mask"]
if data.ndim != 2:
continue
ms = mask.to_sparse_csr()
mt = masked_tensor(data, mask, requires_grad=True)
mts = masked_tensor(data.sparse_mask(ms), ms, requires_grad=True)
converted = mt.to_sparse_csr().to_dense()
converted.sum().backward()
converted2 = mts.to_dense()
converted2.sum().backward()
_compare_mts(converted, converted2)
_compare_mts(mt.grad, mts.grad.to_dense())
def test_invalid_sparse_layout(self, device):
data = torch.randn((3, 4), device=device).to_sparse_csc()
mask = _create_random_mask((3, 4), device=device).to_sparse_csc()
with self.assertRaisesRegex(TypeError, "data layout of torch.sparse_csc is not supported"):
masked_tensor(data, mask)
def test_invalid_sparse_coo_values(self, device):
v = torch.tensor([3, 4, 5], dtype=torch.float32)
i1 = torch.tensor([[0, 1, 1], [2, 0, 2]])
i2 = torch.tensor([[0, 1, 1], [2, 1, 2]])
t = torch.sparse_coo_tensor(i1, v, (2, 4), device=device)
mask = torch.sparse_coo_tensor(i2, torch.tensor([True, True, True]), (2, 4), device=device)
msg = "data and mask are both sparse COO tensors but do not have the same indices."
with self.assertRaisesRegex(ValueError, msg):
masked_tensor(t, mask)
def test_invalid_sparse_csr_values(self, device):
crow_indices1 = [0, 2, 3]
crow_indices2 = [0, 1, 3]
col_indices1 = [0, 1, 2]
col_indices2 = [1, 2, 3]
values = [2, 3, 4]
mask_values = [True, True, True]
t1 = torch.sparse_csr_tensor(
torch.tensor(crow_indices1, dtype=torch.int64),
torch.tensor(col_indices1, dtype=torch.int64),
torch.tensor(values),
size=(2, 4)
)
mask1 = torch.sparse_csr_tensor(
torch.tensor(crow_indices2, dtype=torch.int64),
torch.tensor(col_indices1, dtype=torch.int64),
torch.tensor(mask_values),
dtype=torch.bool,
size=(2, 4),
)
t2 = torch.sparse_csr_tensor(
torch.tensor(crow_indices2, dtype=torch.int64),
torch.tensor(col_indices1, dtype=torch.int64),
torch.tensor(values),
size=(2, 4),
)
mask2 = torch.sparse_csr_tensor(
torch.tensor(crow_indices2, dtype=torch.int64),
torch.tensor(col_indices2, dtype=torch.int64),
torch.tensor(mask_values),
dtype=torch.bool,
size=(2, 4),
)
msg = "data and mask are both sparse CSR tensors but do not share either crow or col indices."
with self.assertRaisesRegex(ValueError, msg):
masked_tensor(t1, mask1)
with self.assertRaisesRegex(ValueError, msg):
masked_tensor(t2, mask2)
def test_contiguous(self, device):
data = torch.randn((3, 3), device=device)
contiguous_data = data.clone()
mask1 = (contiguous_data > 0).bool()
not_contiguous_data = torch.as_strided(data.clone(), (2, 2), (1, 2))
mask2 = (not_contiguous_data > 0).bool()
contiguous_mt = masked_tensor(contiguous_data, mask1)
not_contiguous_mt = masked_tensor(not_contiguous_data, mask2)
contiguous_mt_sparse = masked_tensor(
contiguous_data.to_sparse_coo(), mask1.to_sparse_coo()
)
not_contiguous_mt_sparse = masked_tensor(
not_contiguous_data.to_sparse_coo(), mask2.to_sparse_coo()
)
self.assertEqual(contiguous_data.is_contiguous(), True)
self.assertEqual(not_contiguous_data.is_contiguous(), False)
self.assertEqual(contiguous_mt.is_contiguous(), True)
self.assertEqual(not_contiguous_mt.is_contiguous(), False)
error_msg = "MaskedTensors with sparse data do not have is_contiguous"
for t in [contiguous_mt_sparse, not_contiguous_mt_sparse]:
with self.assertRaisesRegex(ValueError, error_msg):
t.is_contiguous()
with self.assertRaisesRegex(ValueError, error_msg):
t.contiguous()
now_contiguous_mt = not_contiguous_mt.contiguous()
_compare_mts(not_contiguous_mt, now_contiguous_mt)
self.assertEqual(now_contiguous_mt.is_contiguous(), True)
self.assertEqual(now_contiguous_mt.get_data().is_contiguous(), True)
self.assertEqual(now_contiguous_mt.is_contiguous(), True)
class TestUnary(TestCase):
def _get_test_data(self, fn_name):
data = torch.randn(10, 10)
mask = torch.rand(10, 10) > 0.5
fn_name = _fix_fn_name(fn_name)
if fn_name in ["log", "log10", "log1p", "log2", "sqrt"]:
data = data.mul(0.5).abs()
if fn_name in ["rsqrt"]:
data = data.abs() + 1 # Void division by zero
if fn_name in ["acos", "arccos", "asin", "arcsin", "logit"]:
data = data.abs().mul(0.5).clamp(0, 1)
if fn_name in ["atanh", "arctanh", "erfinv"]:
data = data.mul(0.5).clamp(-1, 1)
if fn_name in ["acosh", "arccosh"]:
data = data.abs() + 1
if fn_name in ["bitwise_not"]:
data = data.mul(128).to(torch.int8)
return data, mask
def _get_sample_kwargs(self, fn_name):
fn_name = _fix_fn_name(fn_name)
kwargs = {}
if fn_name in ["clamp", "clip"]:
kwargs["min"] = -0.5
kwargs["max"] = 0.5
return kwargs
def _get_sample_args(self, fn_name, data, mask):
fn_name = _fix_fn_name(fn_name)
mt = masked_tensor(data, mask)
t_args = [data]
mt_args = [mt]
if fn_name in ["pow"]:
t_args += [2.0]
mt_args += [2.0]
return t_args, mt_args
@parametrize("fn", NATIVE_UNARY_FNS)
def test_unary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
t_args, mt_args = self._get_sample_args(fn_name, data, mask)
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
@parametrize("fn", NATIVE_INPLACE_UNARY_FNS)
def test_inplace_unary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
t_args, mt_args = self._get_sample_args(fn_name, data, mask)
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
class TestBinary(TestCase):
def _get_test_data(self, fn_name):
fn_name = _fix_fn_name(fn_name)
data0 = torch.randn(10, 10)
data1 = torch.randn(10, 10)
mask = torch.rand(10, 10) > 0.5
if fn_name in ["bitwise_and", "bitwise_or", "bitwise_xor"]:
data0 = data0.mul(128).to(torch.int8)
data1 = data1.mul(128).to(torch.int8)
if fn_name in ["bitwise_left_shift", "bitwise_right_shift"]:
data0 = data0.abs().to(torch.int64)
data1 = data1.abs().to(torch.int64)
return data0, data1, mask
def _get_sample_kwargs(self, fn_name):
fn_name = _fix_fn_name(fn_name)
kwargs = {}
return kwargs
def _yield_sample_args(self, fn_name, data0, data1, mask):
""" Returns two sets of Tensor and MaskedTensor args for a binary function to compute.
Tensor args are all the same (just the two provided data tensors),
while the MaskedTensor args tests both (MaskedTensor, MaskedTensor) and (MaskedTensor, Tensor)
"""
fn_name = _fix_fn_name(fn_name)
mt0 = masked_tensor(data0, mask)
mt1 = masked_tensor(data1, mask)
t_args = [data0, data1]
mt_args = [mt0, mt1]
yield t_args, mt_args
t_args = [data0, data1]
mt_args = [mt0, data1]
yield t_args, mt_args
@parametrize("fn", NATIVE_BINARY_FNS)
def test_binary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data0, data1, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
for (t_args, mt_args) in self._yield_sample_args(fn_name, data0, data1, mask):
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
@parametrize("fn", NATIVE_INPLACE_BINARY_FNS)
def test_inplace_binary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data0, data1, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
for (t_args, mt_args) in self._yield_sample_args(fn_name, data0, data1, mask):
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
@parametrize("fn_name", ["add", "add_"])
def test_masks_match(self, fn_name):
torch.random.manual_seed(0)
fn = getattr(torch.ops.aten, fn_name)
data0, data1, mask = self._get_test_data(fn_name)
mask0 = mask
mask1 = torch.rand(mask.size()) > 0.5
mt0 = masked_tensor(data0, mask0)
mt1 = masked_tensor(data1, mask1)
try:
fn(mt0, mt1)
raise AssertionError()
except ValueError as e:
assert (
"Input masks must match. If you need support for this, please open an issue on Github."
== str(e)
)
class TestReductions(TestCase):
def test_max_not_implemented(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m)
with self.assertRaisesRegex(TypeError, "no implementation found for 'torch._ops.aten.max.default'"):
mt.max()
def test_sum(self):
d = torch.tensor([[0, 1, 2, 6], [3, 4, 5.0, 7]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(17.0), torch.tensor(True)), mt.sum())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 13]),
torch.tensor([True, True, False, True]),
),
mt.sum(dim=0),
)
def test_sum_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.sum().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor(1.0).expand_as(m), m))
def test_mean(self):
d = torch.tensor([[0, 1, 3, 2], [3, 4, 1.0, 4]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(2.5), torch.tensor(True)), mt.mean())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 3]),
torch.tensor([True, True, False, True]),
),
mt.mean(dim=0),
)
"""
The following block of tests "test_mean_grad_case_1[a through e] are used to test the functionality of
the two different ways of constructing MaskedTensors:
masked_tensor(data, mask, requires_grad=True/False) -- NO differentiable constructor and always a leaf
as_masked_tensor(data, mask) -- differentiable constructor
Like torch.tensor(data), masked_tensor(data, mask) will provide a UserWarning if data.requires_grad=True
as_masked_tensor does not take in requires_grad -- it just takes on the requires_grad from data
Therefore, there are 6 cases to test and we use `mean` as a proxy to test the different combinations
Assuming mt.mean().backward() is run after each constructor:
Case 1a:
values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=True)
yields
- Provide a UserWarning because values.requires_grad=True
- values.grad = None
- mt.grad is a MaskedTensor with the correct gradient
Case 1b:
values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=True)
yields
- values.grad = None
- mt.grad is a MaskedTensor with the correct gradient
Case 2a/2b:
values.requires_grad = True/False
mt = masked_tensor(values, mask, requires_grad=False)
will both yield a RuntimeError of "element 0 of tensors does not require grad and does not have a grad_fn"
as expected. When values.requires_grad=True, we will also get a UserWarning
Case 3a:
values.requires_grad = True
mt = as_masked_tensor(values, mask)
yields
- values.grad is a MaskedTensor with the correct gradient
- mt.grad is None and gives a UserWarning that
"The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad"
Case 3b:
values.requires_grad = False
mt = as_masked_tensor(values, mask)
will yield a RuntimeError of "element 0 of tensors does not require grad and does not have a grad_fn"
as expected.
"""
def test_mean_grad_case_1a(self):
""" values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=True)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
with self.assertWarnsRegex(UserWarning, "It is not recommended to create a MaskedTensor"):
mt = masked_tensor(d, m, requires_grad=True)
mt.mean().backward()
self.assertIsNone(d.grad)
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
def test_mean_grad_case_1b(self):
""" values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=True)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.mean().backward()
self.assertIsNone(d.grad)
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
def test_mean_grad_case_1c(self):
""" values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=False)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
with self.assertWarnsRegex(UserWarning, "It is not recommended to create a MaskedTensor"):
mt = masked_tensor(d, m, requires_grad=False)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_grad_case_1d(self):
""" values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=False)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=False)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_grad_case_1e(self):
""" values.requires_grad = True
mt = as_masked_tensor(values, mask)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
mt = as_masked_tensor(d, m)
mt.mean().backward()
_compare_mts(d.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
msg = "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad"
with self.assertWarnsRegex(UserWarning, msg):
self.assertIsNone(mt.grad)
def test_mean_grad_case_1f(self):
""" values.requires_grad = False
mt = as_masked_tensor(values, mask)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = as_masked_tensor(d, m)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_dim_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, True, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.mean(1).sum().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0.5, 0], [0, 1, 0]]), m))
def test_amax(self):
d = torch.tensor([[0, 1, 3, -3], [3, -4, 1.0, 3]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(3.0), torch.tensor(True)), mt.amax())
_compare_mts(
masked_tensor(
torch.tensor([0.0, -4.0, 1.0, 3]),
torch.tensor([True, True, False, True]),
),
mt.amax(dim=0),
)
def test_amax_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.amax().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.0, 0, 0], [0, 1, 0]]), m))
def test_amin(self):
d = torch.tensor([[0, 1, 3, -3], [3, -4, 1.0, 3]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(-4.0), torch.tensor(True)), mt.amin())
_compare_mts(
masked_tensor(
torch.tensor([0.0, -4.0, 1.0, -3]),
torch.tensor([True, True, False, True]),
),
mt.amin(dim=0),
)
def test_amin_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.amin().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[1.0, 0, 0], [0, 0, 0]]), m))
def test_prod(self):
d = torch.tensor([[0, 1, 3, 0.0], [float("nan"), 4, 1.0, 5.0]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(0.0), torch.tensor(True)), mt.prod())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 0.0]),
torch.tensor([True, True, False, True]),
),
mt.prod(dim=0),
)
def test_prod_grad(self):
d = torch.tensor([[2, float("nan"), 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.prod().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[4.0, 0, 0], [0, 2, 0]]), m))
def test_all(self):
d = torch.tensor([[True, True, False, False], [False, True, True, True]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(False), torch.tensor(True)), mt.all())
_compare_mts(
masked_tensor(
torch.tensor([True, True, True, False]),
torch.tensor([True, True, False, True]),
),
mt.all(dim=0),
)
m = torch.tensor([[True, False, True, False], [False, True, False, False]])
mt = masked_tensor(d, m)
_compare_mts(
masked_tensor(
torch.tensor([True, True, False, True]),
torch.tensor([True, True, True, False]),
),
mt.all(dim=0),
)
def test_grad_dtype(self):
d = torch.tensor([[True, True, False], [False, True, True]])
m = torch.tensor([[True, False, False], [False, True, False]])
msg = "Only Tensors of floating point and complex dtype can require gradients"
with self.assertRaisesRegex(RuntimeError, msg):
masked_tensor(d, m, requires_grad=True)
|
def _fix_fn_name(fn_name):
if fn_name[-1] == "_":
fn_name = fn_name[:-1]
return fn_name
class TestBasics(TestCase):
def test_invalid_tensor_inputs(self, device):
data = torch.randn((3, 4), device=device)
mask = _create_random_mask((3, 4), device=device)
mt = masked_tensor(data, mask)
with self.assertRaisesRegex(TypeError, "data must be a Tensor"):
masked_tensor(mt, mask)
with self.assertRaisesRegex(TypeError, "data must be a Tensor"):
masked_tensor(0, mask)
with self.assertRaisesRegex(TypeError, "mask must be a Tensor"):
masked_tensor(data, mt)
with self.assertRaisesRegex(TypeError, "mask must be a Tensor"):
masked_tensor(data, 0)
def test_diff_layouts(self, device):
data = torch.randn((3, 4), device=device).to_sparse_coo()
mask = _create_random_mask((3, 4), device=device)
with self.assertRaisesRegex(TypeError, "data and mask must have the same layout"):
masked_tensor(data, mask)
def test_diff_dim(self, device):
data = torch.randn((3, 4, 5), device=device)
mask = _create_random_mask((3, 4), device=device)
with self.assertRaisesRegex(ValueError, "data.dim\\(\\) must equal mask.dim\\(\\)"):
masked_tensor(data, mask)
def test_diff_sizes(self, device):
data = torch.randn((3, 4), device=device)
mask = _create_random_mask((3, 3), device=device)
with self.assertRaisesRegex(ValueError, "data.size\\(\\) must equal mask.size\\(\\)"):
masked_tensor(data, mask)
def test_grad_warning(self, device):
data = torch.randn((3, 4), device=device, requires_grad=True)
mask = _create_random_mask((3, 4), device=device)
msg = "It is not recommended to create a MaskedTensor with a tensor that requires_grad."
with self.assertWarnsRegex(UserWarning, msg):
mt = masked_tensor(data, mask)
def test_add(self, device):
data = torch.arange(5.0, device=device)
mask = torch.tensor([True, True, False, True, False], device=device)
m0 = masked_tensor(data, mask)
m1 = masked_tensor(data, ~mask)
with self.assertRaisesRegex(ValueError, "Input masks must match."):
m0 + m1
_compare_mts(m0 + m0, masked_tensor(torch.tensor([0., 2, 0, 6, 0], device=device), mask))
def test_softmax(self, device):
data = torch.randn((3, 4), device=device) * 0.1
mask = torch.tensor(
[
[True, True, True, False],
[False, True, False, True],
[True, True, False, False],
],
device=device
)
_compare_forward_backward(data, mask, lambda t: torch.softmax(t, -1))
def test_where(self, device):
data = torch.tensor([-10.0, -5, 0, 5, 10, 50, 60, 70, 80, 90, 100], device=device)
mask = data < 0
mx = masked_tensor(data, mask, requires_grad=True)
my = masked_tensor(torch.ones_like(data), ~mask, requires_grad=True)
masked_res = torch.where(mask, torch.exp(mx), my)
masked_res.sum().backward()
x = data.detach().clone().requires_grad_()
y = torch.ones_like(x, device=device, requires_grad=True)
tensor_res = torch.where(mask, torch.exp(x), y)
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mx.grad, x.grad)
_compare_mt_t(my.grad, y.grad)
def test_unfold(self, device):
data = torch.rand(5, 5, device=device)
mask = torch.rand(5, 5, device=device) > 0.5
_compare_forward_backward(data, mask, lambda t: t.unfold(1, 2, 2))
def test_nn_unfold(self, device):
data = torch.rand(2, 5, 3, 4, device=device)
mask = torch.rand(2, 5, 3, 4, device=device) > 0.5
_compare_forward_backward(data, mask, lambda t: torch.nn.functional.unfold(t, kernel_size=(2, 3)))
def test_stack(self, device):
masked_tensors = [
masked_tensor(
torch.rand(2, 5, 3, 4, device=device),
torch.rand(2, 5, 3, 4, device=device) > 0.5,
requires_grad=True,
) for _ in range(3)
]
data_tensors = [mt.get_data().detach().clone().requires_grad_() for mt in masked_tensors]
masked_res = torch.stack(masked_tensors)
tensor_res = torch.stack(data_tensors)
masked_res.sum().backward()
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
for mt, t in zip(masked_tensors, data_tensors):
_compare_mt_t(mt.grad, t.grad, atol=1e-06)
def test_to_sparse(self, device):
for sample in _generate_sample_data(device=device):
data = sample.input
mask = sample.kwargs["mask"]
mt = masked_tensor(data.clone().detach(), mask, requires_grad=True)
sparse_mt = mt.to_sparse()
data.to_sparse().to_dense().sum().backward()
sparse_mt.to_dense().sum().backward()
_compare_mt_t(sparse_mt, data)
_compare_mt_t(mt.grad, data.grad)
def test_to_dense(self, device):
samples = _generate_sample_data(
device=device,
layout=torch.sparse_coo
) + _generate_sample_data(device=device, layout=torch.sparse_csr)
for sample in samples:
data = sample.input
mask = sample.kwargs["mask"]
mt = masked_tensor(data, mask, requires_grad=True)
dense_data = data.to_dense().detach().clone().requires_grad_(True)
dense_mt = mt.to_dense()
dense_data.sum().backward()
dense_mt.sum().backward()
_compare_mt_t(dense_mt, dense_data)
_compare_mt_t(mt.grad.to_dense(), dense_data.grad)
def test_to_dense_and_sparse_coo(self, device):
for sample in _generate_sample_data(device=device, layout=torch.strided):
data = sample.input
mask = sample.kwargs["mask"]
ms = mask.to_sparse_coo().coalesce()
mt = masked_tensor(data, mask, requires_grad=True)
mts = masked_tensor(data.sparse_mask(ms), ms, requires_grad=True)
converted = mt.to_sparse().to_dense()
converted.sum().backward()
converted2 = mts.to_dense()
converted2.sum().backward()
_compare_mts(converted, converted2)
_compare_mts(mt.grad, mts.grad.to_dense())
def test_to_dense_and_sparse_csr(self, device):
for sample in _generate_sample_data(device=device, layout=torch.strided):
data = sample.input
mask = sample.kwargs["mask"]
if data.ndim != 2:
continue
ms = mask.to_sparse_csr()
mt = masked_tensor(data, mask, requires_grad=True)
mts = masked_tensor(data.sparse_mask(ms), ms, requires_grad=True)
converted = mt.to_sparse_csr().to_dense()
converted.sum().backward()
converted2 = mts.to_dense()
converted2.sum().backward()
_compare_mts(converted, converted2)
_compare_mts(mt.grad, mts.grad.to_dense())
def test_invalid_sparse_layout(self, device):
data = torch.randn((3, 4), device=device).to_sparse_csc()
mask = _create_random_mask((3, 4), device=device).to_sparse_csc()
with self.assertRaisesRegex(TypeError, "data layout of torch.sparse_csc is not supported"):
masked_tensor(data, mask)
def test_invalid_sparse_coo_values(self, device):
v = torch.tensor([3, 4, 5], dtype=torch.float32)
i1 = torch.tensor([[0, 1, 1], [2, 0, 2]])
i2 = torch.tensor([[0, 1, 1], [2, 1, 2]])
t = torch.sparse_coo_tensor(i1, v, (2, 4), device=device)
mask = torch.sparse_coo_tensor(i2, torch.tensor([True, True, True]), (2, 4), device=device)
msg = "data and mask are both sparse COO tensors but do not have the same indices."
with self.assertRaisesRegex(ValueError, msg):
masked_tensor(t, mask)
def test_invalid_sparse_csr_values(self, device):
crow_indices1 = [0, 2, 3]
crow_indices2 = [0, 1, 3]
col_indices1 = [0, 1, 2]
col_indices2 = [1, 2, 3]
values = [2, 3, 4]
mask_values = [True, True, True]
t1 = torch.sparse_csr_tensor(
torch.tensor(crow_indices1, dtype=torch.int64),
torch.tensor(col_indices1, dtype=torch.int64),
torch.tensor(values),
size=(2, 4)
)
mask1 = torch.sparse_csr_tensor(
torch.tensor(crow_indices2, dtype=torch.int64),
torch.tensor(col_indices1, dtype=torch.int64),
torch.tensor(mask_values),
dtype=torch.bool,
size=(2, 4),
)
t2 = torch.sparse_csr_tensor(
torch.tensor(crow_indices2, dtype=torch.int64),
torch.tensor(col_indices1, dtype=torch.int64),
torch.tensor(values),
size=(2, 4),
)
mask2 = torch.sparse_csr_tensor(
torch.tensor(crow_indices2, dtype=torch.int64),
torch.tensor(col_indices2, dtype=torch.int64),
torch.tensor(mask_values),
dtype=torch.bool,
size=(2, 4),
)
msg = "data and mask are both sparse CSR tensors but do not share either crow or col indices."
with self.assertRaisesRegex(ValueError, msg):
masked_tensor(t1, mask1)
with self.assertRaisesRegex(ValueError, msg):
masked_tensor(t2, mask2)
def test_contiguous(self, device):
data = torch.randn((3, 3), device=device)
contiguous_data = data.clone()
mask1 = (contiguous_data > 0).bool()
not_contiguous_data = torch.as_strided(data.clone(), (2, 2), (1, 2))
mask2 = (not_contiguous_data > 0).bool()
contiguous_mt = masked_tensor(contiguous_data, mask1)
not_contiguous_mt = masked_tensor(not_contiguous_data, mask2)
contiguous_mt_sparse = masked_tensor(
contiguous_data.to_sparse_coo(), mask1.to_sparse_coo()
)
not_contiguous_mt_sparse = masked_tensor(
not_contiguous_data.to_sparse_coo(), mask2.to_sparse_coo()
)
self.assertEqual(contiguous_data.is_contiguous(), True)
self.assertEqual(not_contiguous_data.is_contiguous(), False)
self.assertEqual(contiguous_mt.is_contiguous(), True)
self.assertEqual(not_contiguous_mt.is_contiguous(), False)
error_msg = "MaskedTensors with sparse data do not have is_contiguous"
for t in [contiguous_mt_sparse, not_contiguous_mt_sparse]:
with self.assertRaisesRegex(ValueError, error_msg):
t.is_contiguous()
with self.assertRaisesRegex(ValueError, error_msg):
t.contiguous()
now_contiguous_mt = not_contiguous_mt.contiguous()
_compare_mts(not_contiguous_mt, now_contiguous_mt)
self.assertEqual(now_contiguous_mt.is_contiguous(), True)
self.assertEqual(now_contiguous_mt.get_data().is_contiguous(), True)
self.assertEqual(now_contiguous_mt.is_contiguous(), True)
class TestUnary(TestCase):
def _get_test_data(self, fn_name):
data = torch.randn(10, 10)
mask = torch.rand(10, 10) > 0.5
fn_name = _fix_fn_name(fn_name)
if fn_name in ["log", "log10", "log1p", "log2", "sqrt"]:
data = data.mul(0.5).abs()
if fn_name in ["rsqrt"]:
data = data.abs() + 1 # Void division by zero
if fn_name in ["acos", "arccos", "asin", "arcsin", "logit"]:
data = data.abs().mul(0.5).clamp(0, 1)
if fn_name in ["atanh", "arctanh", "erfinv"]:
data = data.mul(0.5).clamp(-1, 1)
if fn_name in ["acosh", "arccosh"]:
data = data.abs() + 1
if fn_name in ["bitwise_not"]:
data = data.mul(128).to(torch.int8)
return data, mask
def _get_sample_kwargs(self, fn_name):
fn_name = _fix_fn_name(fn_name)
kwargs = {}
if fn_name in ["clamp", "clip"]:
kwargs["min"] = -0.5
kwargs["max"] = 0.5
return kwargs
def _get_sample_args(self, fn_name, data, mask):
fn_name = _fix_fn_name(fn_name)
mt = masked_tensor(data, mask)
t_args = [data]
mt_args = [mt]
if fn_name in ["pow"]:
t_args += [2.0]
mt_args += [2.0]
return t_args, mt_args
@parametrize("fn", NATIVE_UNARY_FNS)
def test_unary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
t_args, mt_args = self._get_sample_args(fn_name, data, mask)
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
@parametrize("fn", NATIVE_INPLACE_UNARY_FNS)
def test_inplace_unary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
t_args, mt_args = self._get_sample_args(fn_name, data, mask)
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
class TestBinary(TestCase):
def _get_test_data(self, fn_name):
fn_name = _fix_fn_name(fn_name)
data0 = torch.randn(10, 10)
data1 = torch.randn(10, 10)
mask = torch.rand(10, 10) > 0.5
if fn_name in ["bitwise_and", "bitwise_or", "bitwise_xor"]:
data0 = data0.mul(128).to(torch.int8)
data1 = data1.mul(128).to(torch.int8)
if fn_name in ["bitwise_left_shift", "bitwise_right_shift"]:
data0 = data0.abs().to(torch.int64)
data1 = data1.abs().to(torch.int64)
return data0, data1, mask
def _get_sample_kwargs(self, fn_name):
fn_name = _fix_fn_name(fn_name)
kwargs = {}
return kwargs
def _yield_sample_args(self, fn_name, data0, data1, mask):
""" Returns two sets of Tensor and MaskedTensor args for a binary function to compute.
Tensor args are all the same (just the two provided data tensors),
while the MaskedTensor args tests both (MaskedTensor, MaskedTensor) and (MaskedTensor, Tensor)
"""
fn_name = _fix_fn_name(fn_name)
mt0 = masked_tensor(data0, mask)
mt1 = masked_tensor(data1, mask)
t_args = [data0, data1]
mt_args = [mt0, mt1]
yield t_args, mt_args
t_args = [data0, data1]
mt_args = [mt0, data1]
yield t_args, mt_args
@parametrize("fn", NATIVE_BINARY_FNS)
def test_binary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data0, data1, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
for (t_args, mt_args) in self._yield_sample_args(fn_name, data0, data1, mask):
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
@parametrize("fn", NATIVE_INPLACE_BINARY_FNS)
def test_inplace_binary(self, fn):
torch.random.manual_seed(0)
fn_name = fn.__name__
data0, data1, mask = self._get_test_data(fn_name)
kwargs = self._get_sample_kwargs(fn_name)
for (t_args, mt_args) in self._yield_sample_args(fn_name, data0, data1, mask):
mt_result = fn(*mt_args, **kwargs)
t_result = fn(*t_args, **kwargs)
_compare_mt_t(mt_result, t_result)
@parametrize("fn_name", ["add", "add_"])
def test_masks_match(self, fn_name):
torch.random.manual_seed(0)
fn = getattr(torch.ops.aten, fn_name)
data0, data1, mask = self._get_test_data(fn_name)
mask0 = mask
mask1 = torch.rand(mask.size()) > 0.5
mt0 = masked_tensor(data0, mask0)
mt1 = masked_tensor(data1, mask1)
try:
fn(mt0, mt1)
raise AssertionError
except ValueError as e:
assert (
"Input masks must match. If you need support for this, please open an issue on Github."
== str(e)
)
class TestReductions(TestCase):
def test_max_not_implemented(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m)
with self.assertRaisesRegex(TypeError, "torch._ops.aten.max.default"):
mt.max()
def test_sum(self):
d = torch.tensor([[0, 1, 2, 6], [3, 4, 5.0, 7]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(17.0), torch.tensor(True)), mt.sum())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 13]),
torch.tensor([True, True, False, True]),
),
mt.sum(dim=0),
)
def test_sum_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.sum().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor(1.0).expand_as(m), m))
def test_mean(self):
d = torch.tensor([[0, 1, 3, 2], [3, 4, 1.0, 4]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(2.5), torch.tensor(True)), mt.mean())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 3]),
torch.tensor([True, True, False, True]),
),
mt.mean(dim=0),
)
"""
The following block of tests "test_mean_grad_case_1[a through e] are used to test the functionality of
the two different ways of constructing MaskedTensors:
masked_tensor(data, mask, requires_grad=True/False) -- NO differentiable constructor and always a leaf
as_masked_tensor(data, mask) -- differentiable constructor
Like torch.tensor(data), masked_tensor(data, mask) will provide a UserWarning if data.requires_grad=True
as_masked_tensor does not take in requires_grad -- it just takes on the requires_grad from data
Therefore, there are 6 cases to test and we use `mean` as a proxy to test the different combinations
Assuming mt.mean().backward() is run after each constructor:
Case 1a:
values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=True)
yields
- Provide a UserWarning because values.requires_grad=True
- values.grad = None
- mt.grad is a MaskedTensor with the correct gradient
Case 1b:
values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=True)
yields
- values.grad = None
- mt.grad is a MaskedTensor with the correct gradient
Case 2a/2b:
values.requires_grad = True/False
mt = masked_tensor(values, mask, requires_grad=False)
will both yield a RuntimeError of "element 0 of tensors does not require grad and does not have a grad_fn"
as expected. When values.requires_grad=True, we will also get a UserWarning
Case 3a:
values.requires_grad = True
mt = as_masked_tensor(values, mask)
yields
- values.grad is a MaskedTensor with the correct gradient
- mt.grad is None and gives a UserWarning that
"The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad"
Case 3b:
values.requires_grad = False
mt = as_masked_tensor(values, mask)
will yield a RuntimeError of "element 0 of tensors does not require grad and does not have a grad_fn"
as expected.
"""
def test_mean_grad_case_1a(self):
""" values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=True)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
with self.assertWarnsRegex(UserWarning, "It is not recommended to create a MaskedTensor"):
mt = masked_tensor(d, m, requires_grad=True)
mt.mean().backward()
self.assertIsNone(d.grad)
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
def test_mean_grad_case_1b(self):
""" values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=True)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.mean().backward()
self.assertIsNone(d.grad)
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
def test_mean_grad_case_1c(self):
""" values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=False)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
with self.assertWarnsRegex(UserWarning, "It is not recommended to create a MaskedTensor"):
mt = masked_tensor(d, m, requires_grad=False)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_grad_case_1d(self):
""" values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=False)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=False)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_grad_case_1e(self):
""" values.requires_grad = True
mt = as_masked_tensor(values, mask)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
mt = as_masked_tensor(d, m)
mt.mean().backward()
_compare_mts(d.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
msg = "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad"
with self.assertWarnsRegex(UserWarning, msg):
self.assertIsNone(mt.grad)
def test_mean_grad_case_1f(self):
""" values.requires_grad = False
mt = as_masked_tensor(values, mask)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = as_masked_tensor(d, m)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_dim_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, True, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.mean(1).sum().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0.5, 0], [0, 1, 0]]), m))
def test_amax(self):
d = torch.tensor([[0, 1, 3, -3], [3, -4, 1.0, 3]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(3.0), torch.tensor(True)), mt.amax())
_compare_mts(
masked_tensor(
torch.tensor([0.0, -4.0, 1.0, 3]),
torch.tensor([True, True, False, True]),
),
mt.amax(dim=0),
)
def test_amax_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.amax().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.0, 0, 0], [0, 1, 0]]), m))
def test_amin(self):
d = torch.tensor([[0, 1, 3, -3], [3, -4, 1.0, 3]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(-4.0), torch.tensor(True)), mt.amin())
_compare_mts(
masked_tensor(
torch.tensor([0.0, -4.0, 1.0, -3]),
torch.tensor([True, True, False, True]),
),
mt.amin(dim=0),
)
def test_amin_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.amin().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[1.0, 0, 0], [0, 0, 0]]), m))
def test_prod(self):
d = torch.tensor([[0, 1, 3, 0.0], [float("nan"), 4, 1.0, 5.0]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(0.0), torch.tensor(True)), mt.prod())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 0.0]),
torch.tensor([True, True, False, True]),
),
mt.prod(dim=0),
)
def test_prod_grad(self):
d = torch.tensor([[2, float("nan"), 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.prod().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[4.0, 0, 0], [0, 2, 0]]), m))
def test_all(self):
d = torch.tensor([[True, True, False, False], [False, True, True, True]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(False), torch.tensor(True)), mt.all())
_compare_mts(
masked_tensor(
torch.tensor([True, True, True, False]),
torch.tensor([True, True, False, True]),
),
mt.all(dim=0),
)
m = torch.tensor([[True, False, True, False], [False, True, False, False]])
mt = masked_tensor(d, m)
_compare_mts(
masked_tensor(
torch.tensor([True, True, False, True]),
torch.tensor([True, True, True, False]),
),
mt.all(dim=0),
)
def test_grad_dtype(self):
d = torch.tensor([[True, True, False], [False, True, True]])
m = torch.tensor([[True, False, False], [False, True, False]])
msg = "Only Tensors of floating point and complex dtype can require gradients"
with self.assertRaisesRegex(RuntimeError, msg):
masked_tensor(d, m, requires_grad=True)
def test_any_true_dtype(self):
mt = torch.masked.MaskedTensor(
torch.rand(2, 2),
torch.rand(2, 2) > 0.5
)
msg = "expected a boolean tensor"
with self.assertRaisesRegex(ValueError, msg):
mt._is_any_true()
def test__is_any_true(self):
mt = torch.masked.MaskedTensor(
torch.tensor([[True, True, False], [False, False, True]]),
torch.tensor([[True, False, False], [False, True, False]]),
)
_compare_mts(
masked_tensor(torch.tensor(True), torch.tensor(True)),
mt._is_any_true(),
)
def test__is_any_true_false(self):
mt = torch.masked.MaskedTensor(
torch.tensor([[True, True, False], [False, False, True]]),
torch.tensor([[False, False, False], [False, False, False]]),
)
_compare_mts(
masked_tensor(torch.tensor(False), torch.tensor(True),),
mt._is_any_true(),
)
def test_backward(self):
# See https://github.com/pytorch/pytorch/issues/128557
with torch.autograd.detect_anomaly():
mt = torch.masked.MaskedTensor(
torch.rand(2, 2),
torch.rand(2, 2) > 0.5,
requires_grad=True
)
mt.sum().backward()
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
test_softmax
|
def test_softmax(self, device):
data = torch.randn((3, 4), device=device) * 0.1
mask = torch.tensor(
[
[True, True, True, False],
[False, True, False, True],
[True, True, False, False],
],
device=device
)
mt = masked_tensor(data, mask, requires_grad=True)
masked_res = torch.softmax(mt, -1)
masked_res.sum().backward()
xinf = data.masked_fill(~mask, float("-inf")).detach().clone().requires_grad_()
tensor_res = torch.softmax(xinf, -1)
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mt.grad, xinf.grad, atol=1e-06)
|
def test_softmax(self, device):
data = torch.randn((3, 4), device=device) * 0.1
mask = torch.tensor(
[
[True, True, True, False],
[False, True, False, True],
[True, True, False, False],
],
device=device
)
_compare_forward_backward(data, mask, lambda t: torch.softmax(t, -1))
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBasics(TestCase):
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBasics(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
test_unfold
|
def test_unfold(self, device):
data = torch.rand(5, 5, device=device)
mask = torch.rand(5, 5, device=device) > 0.5
_compare_forward_backward(data, mask, lambda t: t.unfold(1, 2, 2))
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBasics(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_maskedtensor.py
|
test_nn_unfold
|
def test_nn_unfold(self, device):
data = torch.rand(2, 5, 3, 4, device=device)
mask = torch.rand(2, 5, 3, 4, device=device) > 0.5
_compare_forward_backward(data, mask, lambda t: torch.nn.functional.unfold(t, kernel_size=(2, 3)))
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBasics(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_maskedtensor.py
|
test_stack
|
def test_stack(self, device):
masked_tensors = [
masked_tensor(
torch.rand(2, 5, 3, 4, device=device),
torch.rand(2, 5, 3, 4, device=device) > 0.5,
requires_grad=True,
) for _ in range(3)
]
data_tensors = [mt.get_data().detach().clone().requires_grad_() for mt in masked_tensors]
masked_res = torch.stack(masked_tensors)
tensor_res = torch.stack(data_tensors)
masked_res.sum().backward()
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
for mt, t in zip(masked_tensors, data_tensors):
_compare_mt_t(mt.grad, t.grad, atol=1e-06)
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBasics(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
genf_float
|
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
|
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
test_masks_match
|
def test_masks_match(self, fn_name):
torch.random.manual_seed(0)
fn = getattr(torch.ops.aten, fn_name)
data0, data1, mask = self._get_test_data(fn_name)
mask0 = mask
mask1 = torch.rand(mask.size()) > 0.5
mt0 = masked_tensor(data0, mask0)
mt1 = masked_tensor(data1, mask1)
try:
fn(mt0, mt1)
raise AssertionError()
except ValueError as e:
assert (
"Input masks must match. If you need support for this, please open an issue on Github."
== str(e)
)
|
def test_masks_match(self, fn_name):
torch.random.manual_seed(0)
fn = getattr(torch.ops.aten, fn_name)
data0, data1, mask = self._get_test_data(fn_name)
mask0 = mask
mask1 = torch.rand(mask.size()) > 0.5
mt0 = masked_tensor(data0, mask0)
mt1 = masked_tensor(data1, mask1)
try:
fn(mt0, mt1)
raise AssertionError
except ValueError as e:
assert (
"Input masks must match. If you need support for this, please open an issue on Github."
== str(e)
)
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBinary(TestCase):
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestBinary(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
test_max_not_implemented
|
def test_max_not_implemented(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m)
with self.assertRaisesRegex(TypeError, "no implementation found for 'torch._ops.aten.max.default'"):
mt.max()
|
def test_max_not_implemented(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m)
with self.assertRaisesRegex(TypeError, "torch._ops.aten.max.default"):
mt.max()
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestReductions(TestCase):
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestReductions(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_matmul_cuda.py
|
tensor_to_scale
|
def tensor_to_scale(x: torch.Tensor, float8_dtype: torch.dtype, dim=None):
if dim is None:
amax = torch.max(torch.abs(x))
else:
amax = torch.max(torch.abs(x), dim=dim, keepdim=True).values
return amax_to_scale(amax, float8_dtype, x.dtype)
|
import unittest
from itertools import product
from functools import partial
from typing import Optional
import re
import torch
from torch.quantization._quantized_conversions import (
pack_int4_to_int8,
quantized_weight_reorder_for_mixed_dtypes_linear_cutlass,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import (
SM53OrLater,
SM90OrLater,
_get_torch_cuda_version,
PLATFORM_SUPPORTS_FP8
)
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
onlyCUDA,
tol as xtol,
toleranceOverride,
)
from torch.testing._internal.common_utils import (
IS_ARM64,
IS_JETSON,
IS_WINDOWS,
parametrize,
run_tests,
skipIfRocmVersionLessThan,
TEST_WITH_ROCM,
skipIfRocm,
TestCase,
)
_IS_SM8X = False
f8_msg = "FP8 is only supported on H100+ and sm_89 and MI300+ devices"
EPS = 1e-12
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_matmul_cuda.py
|
test_scaled_mm_vs_emulated_row_wise
|
def test_scaled_mm_vs_emulated_row_wise(self, base_dtype):
torch.manual_seed(42)
input_dtype = e4m3_type
output_dtype = base_dtype
x = torch.randn(16, 16, device="cuda", dtype=base_dtype)
y = torch.randn(32, 16, device="cuda", dtype=base_dtype).t()
x_scales = tensor_to_scale(x, input_dtype, dim=1).float()
y_scales = tensor_to_scale(y, input_dtype, dim=0).float()
x_fp8 = to_fp8_saturated(x * x_scales, e4m3_type)
y_fp8 = to_fp8_saturated(y * y_scales, e4m3_type)
# Calculate actual F8 mm
out_scaled_mm = mm_float8(
x_fp8, y_fp8, a_scale=x_scales, b_scale=y_scales, output_dtype=output_dtype
)
# Calculate emulated F8 mm
out_emulated = mm_float8_emulated(
x_fp8, x_scales, y_fp8, y_scales, output_dtype
)
if base_dtype in {torch.bfloat16, torch.float16}:
atol, rtol = 7e-2, 7e-2
else:
atol, rtol = 2e-3, 2e-3
torch.testing.assert_close(out_scaled_mm, out_emulated, atol=atol, rtol=rtol)
|
import unittest
from itertools import product
from functools import partial
from typing import Optional
import re
import torch
from torch.quantization._quantized_conversions import (
pack_int4_to_int8,
quantized_weight_reorder_for_mixed_dtypes_linear_cutlass,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import (
SM53OrLater,
SM90OrLater,
_get_torch_cuda_version,
PLATFORM_SUPPORTS_FP8
)
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
onlyCUDA,
tol as xtol,
toleranceOverride,
)
from torch.testing._internal.common_utils import (
IS_ARM64,
IS_JETSON,
IS_WINDOWS,
parametrize,
run_tests,
skipIfRocmVersionLessThan,
TEST_WITH_ROCM,
skipIfRocm,
TestCase,
)
_IS_SM8X = False
f8_msg = "FP8 is only supported on H100+ and sm_89 and MI300+ devices"
EPS = 1e-12
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not found")
class TestFP8MatmulCuda(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_matmul_cuda.py
|
test_mixed_dtypes_linear
|
instantiate_device_type_tests(TestMatmulCuda, globals(), except_for="cpu")
if __name__ == '__main__':
run_tests()
|
def test_mixed_dtypes_linear(self, dtype: torch.dtype, device: str = "cuda"):
version = _get_torch_cuda_version()
if version < (11, 8):
self.skipTest("_mixed_dtypes_linear only compiled for CUDA 11.8+")
def run_test(
batch_shape,
m,
n,
k,
add_bias,
activation,
dtype,
dtypeq,
device,
rtol,
atol,
):
if not add_bias and activation != "none":
return
val_lo, val_hi = -1, 1
valq_lo, valq_hi = -2, 2
input = make_tensor(
*batch_shape, m, k, low=val_lo, high=val_hi, dtype=dtype, device=device
)
weight = make_tensor(
n, k, low=valq_lo, high=valq_hi, dtype=torch.int8, device=device
)
scale = make_tensor(
(n,), low=val_lo, high=val_hi, dtype=input.dtype, device=device
)
bias = (
make_tensor(
(n,), low=val_lo, high=val_hi, dtype=input.dtype, device=device
)
if add_bias
else None
)
input_ref = input.reshape(-1, input.shape[-1])
# First, test plain multiplication.
weight_ref = weight.T.to(input.dtype) * scale.view(1, n)
weightq = (
pack_int4_to_int8(weight.T) if dtypeq == torch.quint4x2 else weight.T
)
output_ref = torch.mm(input_ref, weight_ref).reshape(*input.shape[:-1], n)
output = torch.ops.aten._mixed_dtypes_linear(
input,
quantized_weight_reorder_for_mixed_dtypes_linear_cutlass(
weightq, dtypeq, transpose=False
),
scale,
)
torch.testing.assert_close(output, output_ref, rtol=rtol, atol=atol)
# Second, test the linear operator itself.
weight_ref = weight.to(input.dtype) * scale.view(n, 1)
weightq = pack_int4_to_int8(weight) if dtypeq == torch.quint4x2 else weight
bias_ref = bias.view(1, n) if add_bias else None
output_ref = torch.nn.functional.linear(
input_ref, weight_ref, bias=bias_ref
).reshape(*input.shape[:-1], n)
if activation == "relu":
relu = torch.nn.ReLU()
output_ref = relu(output_ref)
elif activation == "silu":
silu = torch.nn.SiLU()
output_ref = silu(output_ref)
output = torch.ops.aten._mixed_dtypes_linear(
input,
quantized_weight_reorder_for_mixed_dtypes_linear_cutlass(
weightq, dtypeq, transpose=True
),
scale,
bias=bias,
activation=activation,
)
torch.testing.assert_close(output, output_ref, rtol=rtol, atol=atol)
dtypeqs = [torch.int8, torch.quint4x2]
batch_shapes = [[], [2], [2, 1]]
shapes = [
[8, 64, 64],
[8, 64, 128],
[8, 128, 64],
[8, 128, 128],
[8, 128, 192],
[8, 128, 256],
[8, 256, 128],
[8, 256, 384],
[8, 384, 256],
]
activations = [None, "relu", "silu"]
rtol, atol = 1e-3, 1e-3
if dtype == torch.bfloat16:
rtol, atol = 1e-2, 1e-3
for dtypeq, batch_shape, (m, n, k), add_bias, activation in product(
dtypeqs, batch_shapes, shapes, (False, True), activations
):
run_test(
batch_shape,
m,
n,
k,
add_bias,
activation,
dtype,
dtypeq,
device,
rtol,
atol,
)
|
import unittest
from itertools import product
from functools import partial
from typing import Optional
import re
import torch
from torch.quantization._quantized_conversions import (
pack_int4_to_int8,
quantized_weight_reorder_for_mixed_dtypes_linear_cutlass,
)
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import (
SM53OrLater,
SM90OrLater,
_get_torch_cuda_version,
PLATFORM_SUPPORTS_FP8
)
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
onlyCUDA,
tol as xtol,
toleranceOverride,
)
from torch.testing._internal.common_utils import (
IS_ARM64,
IS_JETSON,
IS_WINDOWS,
parametrize,
run_tests,
skipIfRocmVersionLessThan,
TEST_WITH_ROCM,
skipIfRocm,
TestCase,
)
_IS_SM8X = False
f8_msg = "FP8 is only supported on H100+ and sm_89 and MI300+ devices"
EPS = 1e-12
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUTLASS")
@unittest.skipIf(IS_WINDOWS, "Windows doesn't support CUTLASS extensions")
@unittest.skipIf(not _IS_SM8X, "mixed dtypes linear only supported on SM 8.x")
class TestMixedDtypesLinearCuda(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_linalg.py
|
test_single_det
|
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
|
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (det)')
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (slogdet)')
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (linalg_slogdet)')
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), f'{desc} (logdet negative case)')
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (logdet non-negative case)')
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
test
|
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=4, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(500)
|
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=4, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = list(itertools.chain.from_iterable(zip(operands, sublists)))
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(500)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
test_backward
|
def test_backward(self):
# See https://github.com/pytorch/pytorch/issues/128557
with torch.autograd.detect_anomaly():
mt = torch.masked.MaskedTensor(
torch.rand(2, 2),
torch.rand(2, 2) > 0.5,
requires_grad=True
)
mt.sum().backward()
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestReductions(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_maskedtensor.py
|
is_reduction
|
def is_reduction(op):
return op.name in REDUCE_NAMES and op.name not in {"all", "mean", "std", "var"}
mt_unary_ufuncs = [op for op in unary_ufuncs if is_unary(op)]
mt_binary_ufuncs = [op for op in binary_ufuncs if is_binary(op)]
mt_reduction_ufuncs = [op for op in reduction_ops if is_reduction(op)]
MASKEDTENSOR_FLOAT_TYPES = {
torch.float16,
torch.float32,
torch.float64,
}
class TestOperators(TestCase):
def _convert_mt_args(self, args, mask, layout):
return [
masked_tensor(
arg.sparse_mask(mask) if layout != torch.strided else arg, mask
)
if torch.is_tensor(arg)
else arg
for arg in args
]
def _test_unary_binary_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
sample_args, sample_kwargs = sample.args, sample.kwargs
mask = (
_make_tensor_mask(input.shape, device)
if "mask" not in sample_kwargs
else sample_kwargs.pop("mask")
)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
# Binary operations currently only support same size masks
if is_binary(op):
if input.shape != sample_args[0].shape:
continue
# Binary operations also don't support kwargs right now
else:
sample_kwargs = {}
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(sample.input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
# If the operation is binary, check that lhs = masked, rhs = regular tensor also works
if is_binary(op) and layout == torch.strided:
mt_result2 = op(mt, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result2, t_result)
def _test_reduction_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
# Reduction operations don't support more advanced args/kwargs right now
sample_args, sample_kwargs = (), {}
if input.dim() == 0 or input.numel() == 0:
continue
mask = _make_tensor_mask(input.shape, device)
if torch.count_nonzero(mask) == 0:
continue
tensor_input = _combine_input_and_mask(op.op, input, mask)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(tensor_input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
@ops(mt_unary_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_unary_core(self, device, dtype, op, layout):
# Skip tests that don't have len(kwargs) == 0
skip_variants = {
"decimals_0",
"decimals_3",
"decimals_neg_3",
}
if op.name == "round" and op.variant_test_name in skip_variants:
return
self._test_unary_binary_equality(device, dtype, op)
@ops(mt_binary_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_binary_core(self, device, dtype, op, layout):
self._test_unary_binary_equality(device, dtype, op, layout)
@ops(mt_reduction_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_reduction_all(self, device, dtype, op, layout):
# argmin and argmax are not currently supported for torch.sparse_csr
if op.name in {"argmin", "argmax"} and layout == torch.sparse_csr:
return
self._test_reduction_equality(device, dtype, op, layout)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
instantiate_device_type_tests(TestBasics, globals(), only_for=only_for)
instantiate_parametrized_tests(TestUnary)
instantiate_parametrized_tests(TestBinary)
instantiate_parametrized_tests(TestReductions)
if __name__ == '__main__':
run_tests()
|
def is_reduction(op):
return op.name in REDUCE_NAMES and op.name not in {"all", "mean", "std", "var"}
mt_unary_ufuncs = [op for op in unary_ufuncs if is_unary(op)]
mt_binary_ufuncs = [op for op in binary_ufuncs if is_binary(op)]
mt_reduction_ufuncs = [op for op in reduction_ops if is_reduction(op)]
MASKEDTENSOR_FLOAT_TYPES = {
torch.float16,
torch.float32,
torch.float64,
}
class TestOperators(TestCase):
def _convert_mt_args(self, args, mask, layout):
return [
masked_tensor(
arg.sparse_mask(mask) if layout != torch.strided else arg, mask
)
if torch.is_tensor(arg)
else arg
for arg in args
]
def _test_unary_binary_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
sample_args, sample_kwargs = sample.args, sample.kwargs
mask = (
_create_random_mask(input.shape, device)
if "mask" not in sample_kwargs
else sample_kwargs.pop("mask")
)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
# Binary operations currently only support same size masks
if is_binary(op):
if input.shape != sample_args[0].shape:
continue
# Binary operations also don't support kwargs right now
else:
sample_kwargs = {}
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(sample.input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
# If the operation is binary, check that lhs = masked, rhs = regular tensor also works
if is_binary(op) and layout == torch.strided:
mt_result2 = op(mt, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result2, t_result)
def _test_reduction_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
# Reduction operations don't support more advanced args/kwargs right now
sample_args, sample_kwargs = (), {}
if input.dim() == 0 or input.numel() == 0:
continue
mask = _create_random_mask(input.shape, device)
if torch.count_nonzero(mask) == 0:
continue
tensor_input = _combine_input_and_mask(op.op, input, mask)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(tensor_input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
@ops(mt_unary_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_unary_core(self, device, dtype, op, layout):
# Skip tests that don't have len(kwargs) == 0
skip_variants = {
"decimals_0",
"decimals_3",
"decimals_neg_3",
}
if op.name == "round" and op.variant_test_name in skip_variants:
return
self._test_unary_binary_equality(device, dtype, op)
@ops(mt_binary_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
# FIXME:
# Result is just wrong; production logic should be fixed
@decorateIf(
unittest.expectedFailure,
lambda params: (
params["op"].name == "add" and
params["dtype"] in [torch.float16, torch.float32] and
params["device"] == "cpu" and
params["layout"] == torch.sparse_csr
)
)
# Result is just wrong; production logic should be fixed
@decorateIf(
unittest.expectedFailure,
lambda params: (
params["op"].name == "sub" and
params["dtype"] in [torch.float16, torch.float32] and
params["device"] == "cpu" and
params["layout"] == torch.sparse_csr
)
)
# Result is just wrong; production logic should be fixed
@decorateIf(
unittest.expectedFailure,
lambda params: (
params["op"].name == "eq" and
params["dtype"] == torch.float64 and
params["device"] == "cpu" and
params["layout"] == torch.sparse_csr
)
)
def test_binary_core(self, device, dtype, op, layout):
self._test_unary_binary_equality(device, dtype, op, layout)
@ops(mt_reduction_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_reduction_all(self, device, dtype, op, layout):
# argmin and argmax are not currently supported for torch.sparse_csr
if op.name in {"argmin", "argmax"} and layout == torch.sparse_csr:
return
self._test_reduction_equality(device, dtype, op, layout)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
instantiate_device_type_tests(TestBasics, globals(), only_for=only_for)
instantiate_parametrized_tests(TestUnary)
instantiate_parametrized_tests(TestBinary)
instantiate_parametrized_tests(TestReductions)
if __name__ == '__main__':
run_tests()
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
_test_unary_binary_equality
|
def _test_unary_binary_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
sample_args, sample_kwargs = sample.args, sample.kwargs
mask = (
_make_tensor_mask(input.shape, device)
if "mask" not in sample_kwargs
else sample_kwargs.pop("mask")
)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
# Binary operations currently only support same size masks
if is_binary(op):
if input.shape != sample_args[0].shape:
continue
# Binary operations also don't support kwargs right now
else:
sample_kwargs = {}
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(sample.input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
# If the operation is binary, check that lhs = masked, rhs = regular tensor also works
if is_binary(op) and layout == torch.strided:
mt_result2 = op(mt, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result2, t_result)
|
def _test_unary_binary_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
sample_args, sample_kwargs = sample.args, sample.kwargs
mask = (
_create_random_mask(input.shape, device)
if "mask" not in sample_kwargs
else sample_kwargs.pop("mask")
)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
# Binary operations currently only support same size masks
if is_binary(op):
if input.shape != sample_args[0].shape:
continue
# Binary operations also don't support kwargs right now
else:
sample_kwargs = {}
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(sample.input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
# If the operation is binary, check that lhs = masked, rhs = regular tensor also works
if is_binary(op) and layout == torch.strided:
mt_result2 = op(mt, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result2, t_result)
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
mt_unary_ufuncs = [op for op in unary_ufuncs if is_unary(op)]
mt_binary_ufuncs = [op for op in binary_ufuncs if is_binary(op)]
mt_reduction_ufuncs = [op for op in reduction_ops if is_reduction(op)]
MASKEDTENSOR_FLOAT_TYPES = {
torch.float16,
torch.float32,
torch.float64,
}
class TestOperators(TestCase):
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
mt_unary_ufuncs = [op for op in unary_ufuncs if is_unary(op)]
mt_binary_ufuncs = [op for op in binary_ufuncs if is_binary(op)]
mt_reduction_ufuncs = [op for op in reduction_ops if is_reduction(op)]
MASKEDTENSOR_FLOAT_TYPES = {
torch.float16,
torch.float32,
torch.float64,
}
class TestOperators(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
test_any_true_dtype
|
def test_any_true_dtype(self):
mt = torch.masked.MaskedTensor(
torch.rand(2, 2),
torch.rand(2, 2) > 0.5
)
msg = "expected a boolean tensor"
with self.assertRaisesRegex(ValueError, msg):
mt._is_any_true()
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestReductions(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_maskedtensor.py
|
test__is_any_true
|
def test__is_any_true(self):
mt = torch.masked.MaskedTensor(
torch.tensor([[True, True, False], [False, False, True]]),
torch.tensor([[True, False, False], [False, True, False]]),
)
_compare_mts(
masked_tensor(torch.tensor(True), torch.tensor(True)),
mt._is_any_true(),
)
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestReductions(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_maskedtensor.py
|
test__is_any_true_false
|
def test__is_any_true_false(self):
mt = torch.masked.MaskedTensor(
torch.tensor([[True, True, False], [False, False, True]]),
torch.tensor([[False, False, False], [False, False, False]]),
)
_compare_mts(
masked_tensor(torch.tensor(False), torch.tensor(True),),
mt._is_any_true(),
)
|
import torch
import unittest
from torch.testing._internal.common_utils import (
decorateIf,
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
class TestReductions(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
run_subtest
|
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as torch.svd
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
|
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = (u * s.unsqueeze(-2)).matmul(v.mH)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as linalg.svdvals
U, S, Vh = torch.linalg.svd(a, full_matrices=False)
V = Vh.mH
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, v = u[..., :actual_rank], v[..., :actual_rank]
U, V = U[..., :actual_rank], V[..., :actual_rank]
expected_ones = u.mH.matmul(U).det().abs()
self.assertEqual(expected_ones, torch.ones_like(expected_ones))
self.assertEqual(v.mH.matmul(V).det().abs(), torch.ones_like(expected_ones))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [ # noqa: B020
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
test_view_mutate
|
def test_view_mutate(self):
x = torch.zeros(4)
y = x.view(2, 2)
to_meta = MetaConverter()
m = to_meta(y)
y.add_(torch.randn(2, 2, requires_grad=True))
m.add_(torch.randn(2, 2, device='meta', requires_grad=True))
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
class TestMetaConverter(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
test_preferred_blas_library
|
def test_preferred_blas_library(self):
# The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions.
m1 = torch.randint(2, 5, (2048, 2400), device='cuda', dtype=torch.float)
m2 = torch.randint(2, 5, (128, 2400), device='cuda', dtype=torch.float)
torch.backends.cuda.preferred_blas_library('cublaslt')
out1 = torch.nn.functional.linear(m1, m2)
torch.backends.cuda.preferred_blas_library('cublas')
out2 = torch.nn.functional.linear(m1, m2)
# Although blas preferred flags doesn't affect CPU currently,
# we set this to make sure the flag can switch back to default normally.
out_ref = torch.nn.functional.linear(m1.cpu(), m2.cpu())
self.assertEqual(out1, out2)
self.assertEqual(out_ref, out2.cpu())
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
test
|
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=4, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(500)
|
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=4, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = list(itertools.chain.from_iterable(zip(operands, sublists)))
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(500)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_linalg.py
|
gen_mat
|
def gen_mat(w, h, use_transpose: bool = False):
if not use_transpose:
return torch.rand(w, h, dtype=dtype, device=device)
return torch.rand(h, w, dtype=dtype, device=device).t()
# Regression tests for https://github.com/pytorch/pytorch/issues/136299
# Should only expose problems on aarch64, but let's be thorough
m, n , k = 1, 8, 32
A = gen_mat(m, k, transpose_a)
B = gen_mat(k, n, transpose_b)
C = torch.ones(m, n, dtype=dtype, device=device)
rc = torch.addmm(C, A, B, alpha=alpha, beta=beta)
ref = alpha * A @ B + beta * C
self.assertEqual(rc, ref)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_linalg.py
|
test_1_sized_with_0_strided
|
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
run_tests()
|
def test_1_sized_with_0_strided(self, device, dtype):
a = make_tensor((8, 1, 64), dtype=dtype, device=device)
a_strided = torch.as_strided(a, size=[8, 1, 64], stride=[64, 0, 1])
b = make_tensor((8, 64, 512), dtype=dtype, device=device)
b_strided = torch.as_strided(b, size=[8, 64, 512], stride=[64, 1, 512])
res = torch.bmm(a_strided, b_strided)
expect = torch.from_numpy(
a_strided.cpu().numpy() @ b_strided.cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res)
|
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import re
import random
from random import randrange
from itertools import product
from functools import reduce, partial
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo,
setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally, serialTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm,
dtypesIfMPS, largeTensorTest)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90OrLater, tf32_on_and_off, _get_magma_version, \
_get_torch_cuda_version, CDNA2OrLater
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.distributions.binomial import Binomial
import torch.backends.opt_einsum as opt_einsum
import operator
import scipy
class TestLinalg(TestCase):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
import scipy.linalg
from torch.testing._internal.common_utils import random_well_conditioned_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_hermitian_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_symmetric_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from numpy.linalg import solve
from scipy.linalg import solve_triangular as tri_solve
import os
import os
import os
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
import os
import os
import os
import os
import os
import os
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
import os
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
from scipy.linalg import ldl as scipy_ldl
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_logging.py
|
testApiUsage
|
def testApiUsage(self):
"""
This test verifies that api usage logging is not triggered via static
initialization. Since it's triggered at first invocation only - we just
subprocess
"""
s = TestCase.runWithPytorchAPIUsageStderr("import torch")
self.assertRegex(s, "PYTORCH_API_USAGE.*import")
# import the shared library directly - it triggers static init but doesn't call anything
s = TestCase.runWithPytorchAPIUsageStderr("from ctypes import CDLL; CDLL('{}')".format(torch._C.__file__))
self.assertNotRegex(s, "PYTORCH_API_USAGE")
|
def testApiUsage(self):
"""
This test verifies that api usage logging is not triggered via static
initialization. Since it's triggered at first invocation only - we just
subprocess
"""
s = TestCase.runWithPytorchAPIUsageStderr("import torch")
self.assertRegex(s, "PYTORCH_API_USAGE.*import")
# import the shared library directly - it triggers static init but doesn't call anything
s = TestCase.runWithPytorchAPIUsageStderr(
f"from ctypes import CDLL; CDLL('{torch._C.__file__}')"
)
self.assertNotRegex(s, "PYTORCH_API_USAGE")
|
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class LoggingTest(TestCase):
|
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
class LoggingTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_maskedtensor.py
|
_make_tensor_mask
|
def _make_tensor_mask(shape, device):
return make_tensor(
shape, device=device, dtype=torch.bool, low=0, high=1, requires_grad=False
)
|
import torch
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
make_tensor,
parametrize,
instantiate_parametrized_tests,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
SampleInput,
binary_ufuncs,
reduction_ops,
unary_ufuncs,
)
from torch.masked import as_masked_tensor, masked_tensor, _combine_input_and_mask
from torch.masked.maskedtensor.core import _masks_match, _tensors_match
from torch.masked.maskedtensor.unary import NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS, UNARY_NAMES
from torch.masked.maskedtensor.binary import NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS, BINARY_NAMES
from torch.masked.maskedtensor.reductions import REDUCE_NAMES
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_meta.py
|
assert_ref_meta_equal
|
def assert_ref_meta_equal(test_case, func, meta_rs, rs, msg_callable):
flat_meta_rs, _ = tree_flatten(meta_rs)
flat_rs, _ = tree_flatten(rs)
test_case.assertEqual(len(flat_meta_rs), len(flat_rs))
for i, meta_r, r in zip(range(len(flat_rs)), flat_meta_rs, flat_rs):
def test_assert(cond, msg):
if not cond:
raise RuntimeError(f"output {i}: {msg_callable(msg)}")
if not isinstance(r, torch.Tensor):
continue
test_assert(isinstance(meta_r, torch.Tensor), f"but real {i}th result is Tensor")
test_assert(meta_r.dtype == r.dtype, f"but real dtype was {r.dtype}")
test_assert(meta_r.shape == r.shape, f"but real shape was {r.shape}")
# See https://github.com/pytorch/pytorch/issues/78050
if should_check_strides(func) == CheckStrides.ALL:
same_strides, _ = torch._prims_common.check_all_strides(meta_r, r)
test_assert(same_strides, f"but real stride was {r.stride()}")
elif should_check_strides(func) == CheckStrides.SIGNIFICANT:
same_strides, _ = torch._prims_common.check_significant_strides(meta_r, r)
test_assert(same_strides, f"but real stride was {r.stride()}")
test_assert(
meta_r.storage_offset() == r.storage_offset(),
f"but real storage_offset was {r.storage_offset()}")
test_assert(meta_r.requires_grad == r.requires_grad, f"but real requires_grad was {r.requires_grad}")
test_assert(meta_r.is_conj() == r.is_conj(), f"but real is_conj was {r.is_conj()}")
test_assert(meta_r.is_neg() == r.is_neg(), f"but real is_neg was {r.is_neg()}")
# This environment variable controls whether or not we print expected failure
# lists at the end of a test suite run. The intended usage looks like this:
#
# 1. Run `PYTORCH_COLLECT_EXPECT=1 python test/test_meta.py` on a CUDA build
# of PyTorch that has LAPACK/MAGMA installed. You can filter `-k test_meta`
# or `-k test_dispatch_meta` to only focus on one or another list
# 2. Given the printed skip/xfail list, add them to the corresponding lists;
# torch.* entries go in meta_function and aten.* entries go in meta_dispatch.
# If there are preexisting entries, you need to merge in the entries.
#
# This is somewhat manual but typically you shouldn't need to do this, unless
# you've made a major change (e.g., added a new dtype to PyTorch) and need to
# refresh the lists. If you want to do it from scratch, just clear out the
# preexisting lists before running.
#
# WARNING: Python dict literals will silently ignore duplicate keys
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
|
def assert_ref_meta_equal(test_case, func, meta_rs, rs, msg_callable):
flat_meta_rs = pytree.tree_leaves(meta_rs)
flat_rs = pytree.tree_leaves(rs)
test_case.assertEqual(len(flat_meta_rs), len(flat_rs))
for i, meta_r, r in zip(range(len(flat_rs)), flat_meta_rs, flat_rs):
def test_assert(cond, msg):
if not cond:
raise RuntimeError(f"output {i}: {msg_callable(msg)}")
if not isinstance(r, torch.Tensor):
continue
test_assert(isinstance(meta_r, torch.Tensor), f"but real {i}th result is Tensor")
test_assert(meta_r.dtype == r.dtype, f"for element {i}, was {meta_r.dtype} but real dtype was {r.dtype}")
test_assert(meta_r.shape == r.shape, f"for element {i}, was {meta_r.shape} but real shape was {r.shape}")
# See https://github.com/pytorch/pytorch/issues/78050
if should_check_strides(func) == CheckStrides.ALL:
same_strides, _ = torch._prims_common.check_all_strides(meta_r, r)
test_assert(same_strides, f"for element {i}, was {meta_r.stride()} but real stride was {r.stride()}")
elif should_check_strides(func) == CheckStrides.SIGNIFICANT:
same_strides, _ = torch._prims_common.check_significant_strides(meta_r, r)
test_assert(same_strides, f"for element {i}, was {meta_r.stride()} but real stride was {r.stride()}")
test_assert(
meta_r.storage_offset() == r.storage_offset(),
f"for element {i}, was {meta_r.storage_offset()} but real storage_offset was {r.storage_offset()}")
test_assert(meta_r.requires_grad == r.requires_grad,
f"for element {i}, was {meta_r.requires_grad} but real requires_grad was {r.requires_grad}")
if func not in CHECK_CONJ_SKIPS:
test_assert(meta_r.is_conj() == r.is_conj(),
f"for element {i}, was {meta_r.is_conj()} but real is_conj was {r.is_conj()}")
test_assert(meta_r.is_neg() == r.is_neg(), f"for element {i}, was {meta_r.is_neg()} but real is_neg was {r.is_neg()}")
# This environment variable controls whether or not we print expected failure
# lists at the end of a test suite run. The intended usage looks like this:
#
# 1. Run `PYTORCH_COLLECT_EXPECT=1 python test/test_meta.py` on a CUDA build
# of PyTorch that has LAPACK/MAGMA installed. You can filter `-k test_meta`
# or `-k test_dispatch_meta` to only focus on one or another list
# 2. Given the printed skip/xfail list, add them to the corresponding lists;
# torch.* entries go in meta_function and aten.* entries go in meta_dispatch.
# If there are preexisting entries, you need to merge in the entries.
#
# This is somewhat manual but typically you shouldn't need to do this, unless
# you've made a major change (e.g., added a new dtype to PyTorch) and need to
# refresh the lists. If you want to do it from scratch, just clear out the
# preexisting lists before running.
#
# WARNING: Python dict literals will silently ignore duplicate keys
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
verbose_print
|
def verbose_print(e):
class Lit:
def __init__(self, s):
self.s = s
def __repr__(self):
return self.s
def go(t):
if isinstance(t, torch.Tensor):
return Lit(f"{t} stride={t.stride()}")
else:
return t
return repr(tree_map(go, e))
|
def verbose_print(e):
class Lit:
def __init__(self, s):
self.s = s
def __repr__(self):
return self.s
def go(t):
if is_sparse_any(t):
return t
elif isinstance(t, torch.Tensor):
return Lit(f"{t} stride={t.stride()}")
else:
return t
return repr(tree_map(go, e))
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
go
|
def go(t):
if isinstance(t, torch.Tensor):
return Lit(f"{t} stride={t.stride()}")
else:
return t
return repr(tree_map(go, e))
|
def go(t):
if is_sparse_any(t):
return t
elif isinstance(t, torch.Tensor):
return Lit(f"{t} stride={t.stride()}")
else:
return t
return repr(tree_map(go, e))
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
get_strided_args
|
def get_strided_args(args):
def get_strided_variants(t, include_storage_offset=False):
variants = []
# contiguous
variants.append(t)
# transposed
if t.ndim > 1:
perm = list(reversed(range(t.ndim)))
transposed = torch.empty(
t.shape[::-1], device=t.device, dtype=t.dtype, requires_grad=t.requires_grad
).permute(perm).copy_(t)
variants.append(transposed)
# nondense
if t.ndim > 0:
nondense = torch.repeat_interleave(t, 2, dim=-1)[..., ::2]
variants.append(nondense)
# channel_last
if t.ndim == 4:
variants.append(t.contiguous(memory_format=torch.channels_last))
# channel_last_3d
if t.ndim == 5:
variants.append(t.contiguous(memory_format=torch.channels_last_3d))
# storage_offset
if include_storage_offset:
buffer = torch.empty(t.numel() + 1, device=t.device, dtype=t.dtype, requires_grad=t.requires_grad)
buffer = buffer.as_strided(t.shape, t.stride(), storage_offset=1)
buffer.copy_(t)
variants.append(buffer)
return variants
strided_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and not arg.is_sparse_csr and arg.is_contiguous():
strided_arg_variants = get_strided_variants(arg)
else:
strided_arg_variants = [arg]
strided_args.append(strided_arg_variants)
yield from itertools.product(*strided_args)
class MetaCrossRefDispatchMode(torch.utils._python_dispatch.TorchDispatchMode):
test_case: TestCase
device: torch.device
dtype: torch.dtype
def __init__(self, test_case, *, device, dtype, symbolic_meta: bool):
self.test_case = test_case
# save TLS
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol
self.device_type = torch.device(device).type
self.dtype = dtype
self.symbolic_meta = symbolic_meta
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
self.test_case.precision = self.precision
self.test_case.rel_tol = self.rel_tol
if self.dtype in meta_dispatch_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_dispatch_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return run_meta_crossref(
self.test_case,
test_expect,
func,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
run_symbolic_meta=self.symbolic_meta,
)
# NB: we're running these tests only on CUDA because there are some
# inconsistencies between CUDA and CPU, and running on CUDA makes it easier
# to ignore the CPU case when inconsistencies arise. Ideally we deal
# with the inconsistencies but this takes time.
class TestMeta(TestCase):
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(self, inplace_variant):
@wraps(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_meta_outplace(self, device, dtype, op):
# run the OpInfo sample inputs, cross-referencing them with the
# meta implementation and check the results are the same. All
# the heavy lifting happens in MetaCrossRefFunctionMode
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=False):
expected = func(*args, **kwargs)
if isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_meta_inplace(self, device, dtype, op):
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
if func in meta_inplace_skips:
self.skipTest("Skipped")
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if sample_input.broadcasts_input:
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=True):
expected = func(*args, **kwargs)
def _run_dispatch_meta_test(self, device, dtype, op, symbolic_meta, inplace, all_stride_variants=False):
if inplace:
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
else:
func = op.get_op()
if func in meta_dispatch_early_skips:
self.skipTest("Function is in dispatch early skips")
if inplace:
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if inplace and sample_input.broadcasts_input:
continue
sample_args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if all_stride_variants and sum(isinstance(arg, torch.Tensor) for arg in sample_args) <= 5:
# test inputs <= 5 tensors to avoid combinatorial explosion
strided_args = get_strided_args(sample_args)
else:
strided_args = [sample_args]
for args in strided_args:
with MetaCrossRefDispatchMode.push(self, dtype=dtype, device=device, symbolic_meta=symbolic_meta):
expected = func(*args, **kwargs)
if not inplace and isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_meta_outplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=False, inplace=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_meta_inplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=False, inplace=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_symbolic_meta_outplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_symbolic_meta_inplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(op_db, dtypes=OpDTypes.any_common_cpu_cuda_one)
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_dispatch_symbolic_meta_outplace_all_strides(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False, all_stride_variants=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(op_db, dtypes=OpDTypes.any_common_cpu_cuda_one)
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_dispatch_symbolic_meta_inplace_all_strides(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=True, all_stride_variants=True)
def test_empty_quantized(self):
r = torch.empty(2 ** 52, device='meta', dtype=torch.qint8)
self.assertEqual(r.device.type, 'meta')
@onlyCPU
def test_meta_autograd_no_error(self):
lib = torch.library.Library("meta_test", "DEF")
impl_cpu = torch.library.Library("meta_test", "IMPL", "CPU")
impl_meta = torch.library.Library("meta_test", "IMPL", "Meta")
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
del impl_meta
del impl_cpu
del lib
def test_huber_loss_backward(self):
inps = [torch.rand(2**52, device='meta') for _ in range(3)]
r = torch.ops.aten.huber_loss_backward(*inps, 0, 1.0)
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, inps[0].shape)
def test_fill__alias_relationship(self):
inps = torch.rand(2**52, device='meta')
r = torch.ops.aten.fill_(inps, 1.0)
# aten.fill_ returns an aliase
self.assertEqual(id(inps), id(r))
# aten.fill returns a new tensor
r2 = torch.ops.aten.fill(inps, 1.0)
self.assertNotEqual(id(inps), id(r2))
def test_meta__fused_moving_avg_obs_fq_helper(self, device):
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
to_meta = MetaConverter()
x = torch.randn(5, 5, device=device)
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
meta_x = to_meta(x)
args = [
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
]
meta_args = args.copy()
meta_args[0] = meta_x
kwargss = [
{},
{"per_row_fake_quant": False, "symmetric_quant": False},
{"per_row_fake_quant": False, "symmetric_quant": True},
]
for kwargs in kwargss:
ref_out = aten._fused_moving_avg_obs_fq_helper.default(*args, **kwargs)
meta_out = aten._fused_moving_avg_obs_fq_helper.default(*meta_args, **kwargs)
self.assertEqual(ref_out[0].size(), meta_out[0].size())
self.assertEqual(ref_out[0].stride(), meta_out[0].stride())
self.assertEqual(ref_out[1].size(), meta_out[1].size())
self.assertEqual(ref_out[1].stride(), meta_out[1].stride())
def test_cdist_forward(self, device):
to_meta = MetaConverter()
x1 = torch.rand([3, 2], device=device)
x2 = torch.rand([2, 2], device=device)
p = 2.0
for compute_mode in (None, 1, 2):
ref = aten._cdist_forward.default(x1, x2, p, compute_mode)
res = aten._cdist_forward.default(to_meta(x1), to_meta(x2), p, compute_mode)
self.assertEqual(res.device.type, 'meta')
self.assertEqual(ref.shape, res.shape)
# opinfo test is using aten.fill_, it's not testing aten.fill
@onlyCUDA
def test_fill_stride(self):
to_meta = MetaConverter()
sample_args = [torch.rand(2, 2, 2, 2), 1.0]
for args in get_strided_args(sample_args):
meta_args = to_meta(args)
ref_out = torch.ops.aten.fill(*args)
meta_out = torch.ops.aten.fill(*meta_args)
self.assertEqual(ref_out.size(), meta_out.size())
self.assertEqual(ref_out.stride(), meta_out.stride())
def test_map_location_deserialize(self):
import io
t = torch.rand(10)
b = io.BytesIO()
torch.save(t, b)
b.seek(0)
r = torch.load(b, map_location=torch.device("meta"))
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, t.shape)
self.assertEqual(r.dtype, t.dtype)
self.assertEqual(r.storage().data_ptr(), 0)
instantiate_device_type_tests(TestMeta, globals())
|
def get_strided_args(args):
def get_strided_variants(t, include_storage_offset=False):
variants = []
# contiguous
variants.append(t)
# transposed
if t.ndim > 1:
perm = list(reversed(range(t.ndim)))
transposed = torch.empty(
t.shape[::-1], device=t.device, dtype=t.dtype, requires_grad=t.requires_grad
).permute(perm).copy_(t)
variants.append(transposed)
# nondense
if t.ndim > 0:
nondense = torch.repeat_interleave(t, 2, dim=-1)[..., ::2]
variants.append(nondense)
# channel_last
if t.ndim == 4:
variants.append(t.contiguous(memory_format=torch.channels_last))
# channel_last_3d
if t.ndim == 5:
variants.append(t.contiguous(memory_format=torch.channels_last_3d))
# storage_offset
if include_storage_offset:
buffer = torch.empty(t.numel() + 1, device=t.device, dtype=t.dtype, requires_grad=t.requires_grad)
buffer = buffer.as_strided(t.shape, t.stride(), storage_offset=1)
buffer.copy_(t)
variants.append(buffer)
return variants
strided_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and not arg.is_sparse_csr and arg.is_contiguous():
strided_arg_variants = get_strided_variants(arg)
else:
strided_arg_variants = [arg]
strided_args.append(strided_arg_variants)
yield from itertools.product(*strided_args)
class MetaCrossRefDispatchMode(torch.utils._python_dispatch.TorchDispatchMode):
test_case: TestCase
device: torch.device
dtype: torch.dtype
aten_olp_no_out_overload: set = set()
def __init__(self, test_case, *, device, dtype, symbolic_meta: bool, inplace: bool, supports_out: bool):
self.test_case = test_case
# save TLS
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol
self.device_type = torch.device(device).type
self.dtype = dtype
self.symbolic_meta = symbolic_meta
self.inplace = inplace
self.supports_out = supports_out
@staticmethod
def try_resolve_aten_out_overload(ol, args, kwargs, num_outputs):
ol_args = ol._schema.arguments
olp: OpOverloadPacket = ol._overloadpacket
if olp in MetaCrossRefDispatchMode.aten_olp_no_out_overload:
return (None, None, None)
candidate_ols = []
for candidate_ol_name in olp.overloads():
candidate_ol = getattr(olp, candidate_ol_name)
if any(arg.is_out for arg in candidate_ol._schema.arguments):
candidate_ols.append(candidate_ol)
if not candidate_ols:
MetaCrossRefDispatchMode.aten_olp_no_out_overload.add(olp)
return (None, None, None)
# Now match based on args, kwargs and number of required outputs
candidate_ol: OpOverload = None
for candidate_ol in candidate_ols:
candidate_ol_args = candidate_ol._schema.arguments
if (len(args) >= len(candidate_ol_args)):
continue
# Positional arguments must have the same type
if not all(
ol_args[pos_arg_ind].type == candidate_ol_args[pos_arg_ind].type
for pos_arg_ind in range(len(args))
):
continue
# Number of outputs must match
candidate_out_names = [out_arg.name for out_arg in candidate_ol_args[-num_outputs:] if out_arg.is_out]
if len(candidate_out_names) != num_outputs:
continue
# Now try and match kwargs. Just need to ensure that the
# remaining kwargs allow an out overload to be called. For example
# we can throw away parameters like `dtype` that may be passed to the
# functional version of the op since the `dtype` will already be present
# in the `out` argument
new_kwargs = {}
kwargs_match = True
for arg in candidate_ol_args[len(args):-num_outputs]:
if arg.name not in kwargs:
if arg.has_default_value():
new_kwargs[arg.name] = arg.default_value
elif isinstance(arg.type, torch.OptionalType):
if isinstance(arg.type.getElementType(), torch.BoolType):
new_kwargs[arg.name] = False
else:
new_kwargs[arg.name] = None
else:
kwargs_match = False
break
else:
new_kwargs[arg.name] = kwargs[arg.name]
if kwargs_match:
return candidate_ol, candidate_out_names, new_kwargs
return None, None, None
def _get_expected_test_result(self, func: OpOverload):
if self.dtype in meta_dispatch_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_dispatch_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return test_expect
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
self.test_case.precision = self.precision
self.test_case.rel_tol = self.rel_tol
test_expect = self._get_expected_test_result(func)
expected = run_meta_crossref(
self.test_case,
test_expect,
func,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
run_symbolic_meta=self.symbolic_meta,
)
# This is to test torch ops that do not have an out parameter but have
# aten op overloads that have out parameters. Additionally, Python decompositions
# may register OpOverloadPacket's so decompositions need to be tested
# to ensure all OpOverloads still function for the Meta key (e.g. if a python decomposition
# is registered for an aten op aten.foo with overloads [default, out], the python
# function needs to support receiving `out` arguments)
if (
not self.inplace and
not self.supports_out and
test_expect == TestExpect.SUCCESS and
(torch.is_tensor(expected) or isinstance(expected, Iterable))
):
# check to see if there is a potential out overload
num_outputs = 1 if torch.is_tensor(expected) else len(expected)
func_out_overload, out_param_names, kwargs = self.try_resolve_aten_out_overload(func, args, kwargs, num_outputs)
if func_out_overload:
if num_outputs == 1:
kwargs[out_param_names[0]] = expected
else:
for ind, out_param_name in enumerate(out_param_names):
kwargs[out_param_name] = expected[ind]
test_expect = self._get_expected_test_result(func_out_overload)
run_meta_crossref(
self.test_case,
test_expect,
func_out_overload,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
run_symbolic_meta=self.symbolic_meta,
)
return expected
# NB: we're running these tests only on CUDA because there are some
# inconsistencies between CUDA and CPU, and running on CUDA makes it easier
# to ignore the CPU case when inconsistencies arise. Ideally we deal
# with the inconsistencies but this takes time.
@unMarkDynamoStrictTest
class TestMeta(TestCase):
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(self, inplace_variant):
@wraps(inplace_variant)
def _fn(t, *args, **kwargs):
if isinstance(t, list):
return inplace_variant([x.clone() for x in t], *args, **kwargs)
else:
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(itertools.chain(op_db, foreach_op_db))
def test_meta_outplace(self, device, dtype, op):
if "_scaled_mm" in op.name:
raise unittest.SkipTest("_scaled_mm dose not support meta device")
skip_op_names = (
"fft.ihfft",
"fft.ihfft2",
"linalg.lu_solve",
)
if TEST_WITH_TORCHDYNAMO and op.name in skip_op_names:
raise unittest.SkipTest("flaky")
# run the OpInfo sample inputs, cross-referencing them with the
# meta implementation and check the results are the same. All
# the heavy lifting happens in MetaCrossRefFunctionMode
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=False):
expected = func(*args, **kwargs)
if isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
# Special test for functions taking "device" kwarg
# The crossref tests that replacing the device with "meta" works
# This part makes sure that *_like functions work well with a "meta"
# Tensor and their original device argument.
if "device" in kwargs and "_like" in op.name:
with torch.random.fork_rng():
torch.manual_seed(123)
ref = func(*args, **kwargs)
# *_like functions take a Tensor as first argument
assert isinstance(args[0], torch.Tensor)
with torch.random.fork_rng():
torch.manual_seed(123)
args[0] = args[0].to(device="meta")
meta = func(*args, **kwargs)
# empty_like is not deterministic
if op.name != "empty_like":
self.assertEqual(ref, meta)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(itertools.chain(op_db, foreach_op_db))
def test_meta_inplace(self, device, dtype, op):
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
if op.promotes_int_to_float and not dtype.is_floating_point:
self.skipTest("Op promotes to float, which is impossible for inplace with non-float input")
if func in meta_inplace_skips:
self.skipTest("Skipped")
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if sample_input.broadcasts_input:
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=True):
expected = func(*args, **kwargs)
def _run_dispatch_meta_test(self, device, dtype, op, symbolic_meta, inplace, all_stride_variants=False):
if "_scaled_mm" in op.name:
raise unittest.SkipTest("_scaled_mm dose not support meta device")
if inplace:
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
if op.promotes_int_to_float and not dtype.is_floating_point:
self.skipTest("Op promotes to float, which is impossible for inplace with non-float input")
else:
func = op.get_op()
if func in meta_dispatch_early_skips:
self.skipTest("Function is in dispatch early skips")
if inplace:
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if inplace and sample_input.broadcasts_input:
continue
sample_args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if all_stride_variants and sum(isinstance(arg, torch.Tensor) for arg in sample_args) <= 5:
# test inputs <= 5 tensors to avoid combinatorial explosion
strided_args = get_strided_args(sample_args)
else:
strided_args = [sample_args]
for args in strided_args:
with MetaCrossRefDispatchMode.push(
self, dtype=dtype, device=device,
symbolic_meta=symbolic_meta, inplace=inplace,
supports_out=op.supports_out):
expected = func(*args, **kwargs)
if not inplace and isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(itertools.chain(op_db, foreach_op_db))
def test_dispatch_meta_outplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=False, inplace=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(itertools.chain(op_db, foreach_op_db))
def test_dispatch_meta_inplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=False, inplace=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(itertools.chain(op_db, foreach_op_db))
def test_dispatch_symbolic_meta_outplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(itertools.chain(op_db, foreach_op_db))
def test_dispatch_symbolic_meta_inplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(itertools.chain(op_db, foreach_op_db), dtypes=OpDTypes.any_common_cpu_cuda_one)
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_dispatch_symbolic_meta_outplace_all_strides(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False, all_stride_variants=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(itertools.chain(op_db, foreach_op_db), dtypes=OpDTypes.any_common_cpu_cuda_one)
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_dispatch_symbolic_meta_inplace_all_strides(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=True, all_stride_variants=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(binary_ufuncs, allowed_dtypes=(torch.float32,))
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_binary_ufuncs_mixed_dtype(self, device, dtype, op):
make_arg = partial(
make_tensor,
device=device,
)
def sample_input(op, device, dtype, requires_grad, **kwargs):
yield SampleInput(
make_arg((S,), dtype=dtype), make_arg((S,), dtype=torch.float16)
)
op = copy.copy(op)
op.sample_inputs_func = sample_input
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False)
def test_empty_quantized(self):
r = torch.empty(2 ** 52, device='meta', dtype=torch.qint8)
self.assertEqual(r.device.type, 'meta')
def test_nan_to_num(self):
t = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14], device='meta')
r = t.nan_to_num()
self.assertEqual(r.device.type, 'meta')
def test_inplace_masked_fill_error(self):
t = torch.randn(3, 3, device='meta')
with self.assertRaisesRegex(RuntimeError, "doesn't match the broadcast"):
t.masked_fill_((t > 0).unsqueeze(0), 0.1)
def test_inplace_bin_ops_error(self):
t = torch.randn(3, 3, device='meta')
for op in (torch.Tensor.add_, torch.Tensor.sub_, torch.Tensor.mul_, torch.Tensor.div_,
torch.Tensor.logical_and_, torch.Tensor.logical_or_, torch.Tensor.logical_xor_):
with self.assertRaisesRegex(RuntimeError, "doesn't match the broadcast"):
op(t, t.clone().unsqueeze(0))
@onlyCPU
def test_meta_autograd_no_error(self):
with torch.library._scoped_library("meta_test", "DEF") as lib:
with torch.library._scoped_library("meta_test", "IMPL", "CPU") as impl_cpu:
with torch.library._scoped_library("meta_test", "IMPL", "Meta") as impl_meta:
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
def test_huber_loss_backward(self):
inps = [torch.rand(2**52, device='meta') for _ in range(3)]
r = torch.ops.aten.huber_loss_backward(*inps, 0, 1.0)
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, inps[0].shape)
def _norm_backwards_test_helper(self, op, args, output_mask, expected_shapes):
dtype = torch.float32
device = "meta"
# test functional call
grads = op(*args, output_mask)
def assertEqualShapes(res, exp):
self.assertIsNone(res) if exp is None else self.assertEqual(exp, res.shape)
assertEqualShapes(grads[0], expected_shapes[0])
assertEqualShapes(grads[1], expected_shapes[1])
assertEqualShapes(grads[2], expected_shapes[2])
out_kwargs = {
f"out{i}": torch.empty(0, device=device, dtype=dtype)
for i in range(len(output_mask))
}
# test call with out parameters
grads = op(*args, output_mask, **out_kwargs)
def assertEqualShapes(res, exp):
self.assertEqual(exp, res.shape) if exp is not None else True
assertEqualShapes(out_kwargs["out0"], expected_shapes[0])
assertEqualShapes(out_kwargs["out1"], expected_shapes[1])
assertEqualShapes(out_kwargs["out2"], expected_shapes[2])
@onlyCPU
@parametrize("output_mask", list(itertools.product([True, False], [True, False], [True, False])))
def test_layer_norm_backward(self, output_mask):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
device = "meta"
dtype = torch.float32
samples = sample_inputs_layer_norm(None, device, dtype, requires_grad=False)
for sample in samples:
with self.subTest(sample=sample):
# handle optional weight and bias
if len(sample.args) != 3:
sample.args = (*sample.args, *([None] * (3 - len(sample.args))))
grad_out = torch.ones_like(sample.input)
normalized_shape, weight, bias = sample.args
ndims_after_reduction = sample.input.ndim - len(normalized_shape)
mean_shape = grad_out.shape[:ndims_after_reduction]
mean = torch.zeros(mean_shape, device=device, dtype=dtype)
rstd = torch.zeros(mean_shape, device=device, dtype=dtype)
expected_shapes = (
sample.input.shape if output_mask[0] else None,
weight.shape if output_mask[1] and weight is not None else None,
bias.shape if output_mask[2] and bias is not None else None)
args = [grad_out, sample.input, normalized_shape, mean, rstd, weight, bias]
self._norm_backwards_test_helper(torch.ops.aten.native_layer_norm_backward,
args, output_mask, expected_shapes)
@onlyCPU
@parametrize("output_mask", list(itertools.product([True, False], [True, False], [True, False])))
def test_group_norm_backward(self, output_mask):
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
# input, (args) num_groups, (kwargs) weight, bias eps
device = "meta"
dtype = torch.float32
samples = sample_inputs_group_norm(None, device, dtype, requires_grad=False)
for sample in samples:
with self.subTest(sample=sample):
grad_out = torch.ones_like(sample.input)
N, C = sample.input.shape[:2]
HxW = torch.prod(torch.as_tensor(sample.input.shape[2:]), dtype=torch.int32).item()
group = sample.args[0]
mean = torch.zeros((N, group), device=device, dtype=dtype)
rstd = torch.zeros((N, group), device=device, dtype=dtype)
weight = torch.zeros((C), device=device, dtype=dtype)
args = [grad_out, sample.input, mean, rstd, weight, N, C, HxW, group]
expected_shapes = (
sample.input.shape if output_mask[0] else None,
weight.shape if output_mask[1] else None,
weight.shape if output_mask[2] else None)
# test functional call
self._norm_backwards_test_helper(torch.ops.aten.native_group_norm_backward,
args, output_mask, expected_shapes)
@onlyCPU
@parametrize("output_mask", list(itertools.product([True], [True, False], [True, False])))
def test_batch_norm_backward(self, output_mask):
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
# input, (args) num_groups, (kwargs) weight, bias eps
device = "meta"
dtype = torch.float32
samples = sample_inputs_batch_norm(None, device, dtype, requires_grad=False)
for sample in samples:
with self.subTest(sample=sample):
if sample.input.dim() < 2:
continue
grad_out = torch.ones_like(sample.input)
running_mean, running_var, weight, bias = sample.args
train = sample.kwargs.get("training", True)
save_mean = torch.zeros((sample.input.shape[1], ), device=device, dtype=dtype) if train else None
save_invstd = torch.zeros((sample.input.shape[1], ), device=device, dtype=dtype) if train else None
args = [grad_out, sample.input, weight, running_mean, running_var,
save_mean, save_invstd, train, sample.kwargs.get("eps", 1e-5)]
expected_shapes = (
sample.input.shape,
torch.Size([sample.input.shape[1]]) if output_mask[1] else None,
torch.Size([sample.input.shape[1]]) if output_mask[2] else None)
self._norm_backwards_test_helper(torch.ops.aten.native_batch_norm_backward,
args, output_mask, expected_shapes)
def test_fill__alias_relationship(self):
inps = torch.rand(2**52, device='meta')
r = torch.ops.aten.fill_(inps, 1.0)
# aten.fill_ returns an aliase
self.assertEqual(id(inps), id(r))
# aten.fill returns a new tensor
r2 = torch.ops.aten.fill(inps, 1.0)
self.assertNotEqual(id(inps), id(r2))
def test_meta__fused_moving_avg_obs_fq_helper(self, device):
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
to_meta = MetaConverter()
x = torch.randn(5, 5, device=device)
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
meta_x = to_meta(x)
args = [
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
]
meta_args = args.copy()
meta_args[0] = meta_x
kwargss = [
{},
{"per_row_fake_quant": False, "symmetric_quant": False},
{"per_row_fake_quant": False, "symmetric_quant": True},
]
for kwargs in kwargss:
ref_out = aten._fused_moving_avg_obs_fq_helper.default(*args, **kwargs)
meta_out = aten._fused_moving_avg_obs_fq_helper.default(*meta_args, **kwargs)
self.assertEqual(ref_out[0].size(), meta_out[0].size())
self.assertEqual(ref_out[0].stride(), meta_out[0].stride())
self.assertEqual(ref_out[1].size(), meta_out[1].size())
self.assertEqual(ref_out[1].stride(), meta_out[1].stride())
def test_cdist_forward(self, device):
to_meta = MetaConverter()
x1 = torch.rand([3, 2], device=device)
x2 = torch.rand([2, 2], device=device)
p = 2.0
for compute_mode in (None, 1, 2):
ref = aten._cdist_forward.default(x1, x2, p, compute_mode)
res = aten._cdist_forward.default(to_meta(x1), to_meta(x2), p, compute_mode)
self.assertEqual(res.device.type, 'meta')
self.assertEqual(ref.shape, res.shape)
def test_quantized_embedding_bag(self):
tab_shape = [8, 128]
emb_size, ind_len, off_len = tab_shape[0], 32, 33
f_table = torch.from_numpy((np.random.random_sample(tab_shape) + 1).astype(np.float32))
q_table = torch.ops.quantized.embedding_bag_byte_prepack(f_table)
indices = torch.from_numpy(np.random.randint(low=0, high=emb_size, size=ind_len)).int()
max_length = len(indices) // (off_len - 1)
if max_length > 20:
max_length = 20
np_lengths = np.random.randint(0, max_length + 1, size=off_len - 1).astype(np.int32)
offsets = torch.cat([torch.zeros([1]), torch.cumsum(torch.from_numpy(np_lengths), 0)]).int()
eb = torch.ops.quantized.embedding_bag_byte_rowwise_offsets(
q_table.to(device="meta"),
indices.to(device="meta"),
offsets.to(device="meta"),
mode=0, # sum
per_sample_weights=None,
include_last_offset=True,
)
self.assertEqual(eb.shape, [32, 128])
self.assertEqual(eb.dtype, torch.float32)
self.assertEqual(eb.untyped_storage().data_ptr(), 0)
# Tests mean and max.
# Can't easily test sum, because there is a fast path for sum which
# causes offset2bag to not get allocated... but the backward function
# needs it, and the offset2bag computation lives inside the
# derivatives.yaml formula directly, so there is no way to access it.
# To test sum, need to manually compute offset2bag
@parametrize("mode", [1, 2])
def test_embedding_bag_dense_backward(self, mode):
weight = torch.randn(4, 3, requires_grad=True)
indices = torch.tensor([1, 0, 2, 1, 3])
offsets = torch.tensor([0, 2, 3, 5])
scale_grad_by_freq = False
sparse = False
per_sample_weights = None
include_last_offset = False
padding_idx = -1
output, offset2bag, bag_size, maximum_indices = torch.ops.aten._embedding_bag.default(
weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx
)
grad = torch.randn_like(output)
# Call the function with example inputs
grad_weight = torch.ops.aten._embedding_bag_dense_backward.default(
grad, indices, offset2bag, bag_size, maximum_indices, weight.size(0),
scale_grad_by_freq, mode, per_sample_weights, padding_idx
)
meta_grad_weight = torch.ops.aten._embedding_bag_dense_backward.default(
grad.to('meta'), indices.to('meta'), offset2bag.to('meta'), bag_size.to('meta'),
maximum_indices.to('meta'), weight.size(0),
scale_grad_by_freq, mode, per_sample_weights, padding_idx
)
self.assertEqual(grad_weight.to('meta'), meta_grad_weight)
def test_embedding_bag_dense_backward_per_sample_weights(self):
weight = torch.randn(4, 3, requires_grad=True)
indices = torch.tensor([1, 0, 2, 1, 3])
offsets = torch.tensor([0, 2, 3, 5])
scale_grad_by_freq = False
sparse = False
mode = 0
per_sample_weights = torch.randn(5, requires_grad=True)
include_last_offset = False
padding_idx = -1
output, offset2bag, bag_size, maximum_indices = torch.ops.aten._embedding_bag.default(
weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx
)
grad = torch.randn_like(output)
# Call the function with example inputs
grad_weight = torch.ops.aten._embedding_bag_per_sample_weights_backward.default(
grad, weight, indices, offsets, offset2bag, mode, padding_idx
)
meta_grad_weight = torch.ops.aten._embedding_bag_per_sample_weights_backward.default(
grad.to('meta'), weight.to('meta'), indices.to('meta'),
offsets.to('meta'), offset2bag.to('meta'), mode, padding_idx
)
self.assertEqual(grad_weight.to('meta'), meta_grad_weight)
# opinfo test is using aten.fill_, it's not testing aten.fill
@onlyCUDA
def test_fill_stride(self):
to_meta = MetaConverter()
sample_args = [torch.rand(2, 2, 2, 2), 1.0]
for args in get_strided_args(sample_args):
meta_args = to_meta(args)
ref_out = torch.ops.aten.fill(*args)
meta_out = torch.ops.aten.fill(*meta_args)
self.assertEqual(ref_out.size(), meta_out.size())
self.assertEqual(ref_out.stride(), meta_out.stride())
def test_map_location_deserialize(self):
import io
t = torch.rand(10)
b = io.BytesIO()
torch.save(t, b)
b.seek(0)
r = torch.load(b, map_location=torch.device("meta"))
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, t.shape)
self.assertEqual(r.dtype, t.dtype)
self.assertEqual(r.storage().data_ptr(), 0)
def test_embedding_bag_byte_prepack(self):
batch_size = 10
num_embeddings = 80
embedding_dim = [128, 256, 512]
res_shape = [[batch_size, num_embeddings, ed + 8] for ed in embedding_dim]
for ed, rs in zip(embedding_dim, res_shape):
weight = torch.randn(batch_size, num_embeddings, ed, dtype=torch.float32)
res = torch.ops.quantized.embedding_bag_byte_prepack(weight.to(device="meta"))
self.assertEqual(res.shape, rs)
self.assertEqual(res.dtype, torch.float32)
self.assertEqual(res.untyped_storage().data_ptr(), 0)
def test_embedding_bag_byte_unpack(self):
batch_size = 10
num_embeddings = 80
embedding_dim = [128, 256, 512]
res_shape = [[batch_size, num_embeddings, ed] for ed in embedding_dim]
for ed, rs in zip(embedding_dim, res_shape):
packed_weight = torch.randn(batch_size, num_embeddings, ed + 8, dtype=torch.float32)
res = torch.ops.quantized.embedding_bag_byte_unpack(packed_weight.to(device="meta"))
self.assertEqual(res.shape, rs)
self.assertEqual(res.dtype, torch.float32)
self.assertEqual(res.untyped_storage().data_ptr(), 0)
def test_index_select_out(self):
def f():
input = torch.randn([8, 16], device='meta')
index = torch.tensor([2, 1, 6, 7, 3, 1, 7, 5, 6, 7], device='meta')
out = torch.empty([10, 16], device='meta')
return torch.index_select(input=input, dim=0, index=index, out=out)
with enable_python_dispatcher():
out = f()
self.assertEqual(out.shape, [10, 16])
def test_local_scalar_dense_call(self):
with self.assertRaisesRegex(RuntimeError, "cannot be called on meta tensors"):
meta_tensor = torch.randn(1, device='meta')
meta_tensor.item()
instantiate_device_type_tests(TestMeta, globals())
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
try_resolve_aten_out_overload
|
self.test_case.precision = self.precision
self.test_case.rel_tol = self.rel_tol
|
def try_resolve_aten_out_overload(ol, args, kwargs, num_outputs):
ol_args = ol._schema.arguments
olp: OpOverloadPacket = ol._overloadpacket
if olp in MetaCrossRefDispatchMode.aten_olp_no_out_overload:
return (None, None, None)
candidate_ols = []
for candidate_ol_name in olp.overloads():
candidate_ol = getattr(olp, candidate_ol_name)
if any(arg.is_out for arg in candidate_ol._schema.arguments):
candidate_ols.append(candidate_ol)
if not candidate_ols:
MetaCrossRefDispatchMode.aten_olp_no_out_overload.add(olp)
return (None, None, None)
# Now match based on args, kwargs and number of required outputs
candidate_ol: OpOverload = None
for candidate_ol in candidate_ols:
candidate_ol_args = candidate_ol._schema.arguments
if (len(args) >= len(candidate_ol_args)):
continue
# Positional arguments must have the same type
if not all(
ol_args[pos_arg_ind].type == candidate_ol_args[pos_arg_ind].type
for pos_arg_ind in range(len(args))
):
continue
# Number of outputs must match
candidate_out_names = [out_arg.name for out_arg in candidate_ol_args[-num_outputs:] if out_arg.is_out]
if len(candidate_out_names) != num_outputs:
continue
# Now try and match kwargs. Just need to ensure that the
# remaining kwargs allow an out overload to be called. For example
# we can throw away parameters like `dtype` that may be passed to the
# functional version of the op since the `dtype` will already be present
# in the `out` argument
new_kwargs = {}
kwargs_match = True
for arg in candidate_ol_args[len(args):-num_outputs]:
if arg.name not in kwargs:
if arg.has_default_value():
new_kwargs[arg.name] = arg.default_value
elif isinstance(arg.type, torch.OptionalType):
if isinstance(arg.type.getElementType(), torch.BoolType):
new_kwargs[arg.name] = False
else:
new_kwargs[arg.name] = None
else:
kwargs_match = False
break
else:
new_kwargs[arg.name] = kwargs[arg.name]
if kwargs_match:
return candidate_ol, candidate_out_names, new_kwargs
return None, None, None
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
class MetaCrossRefDispatchMode(torch.utils._python_dispatch.TorchDispatchMode):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_meta.py
|
test_binary_ufuncs_mixed_dtype
|
def test_binary_ufuncs_mixed_dtype(self, device, dtype, op):
make_arg = partial(
make_tensor,
device=device,
)
def sample_input(op, device, dtype, requires_grad, **kwargs):
yield SampleInput(
make_arg((S,), dtype=dtype), make_arg((S,), dtype=torch.float16)
)
op = copy.copy(op)
op.sample_inputs_func = sample_input
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
sample_input
|
def sample_input(op, device, dtype, requires_grad, **kwargs):
yield SampleInput(
make_arg((S,), dtype=dtype), make_arg((S,), dtype=torch.float16)
)
op = copy.copy(op)
op.sample_inputs_func = sample_input
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
test_nan_to_num
|
def test_nan_to_num(self):
t = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14], device='meta')
r = t.nan_to_num()
self.assertEqual(r.device.type, 'meta')
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
test_inplace_masked_fill_error
|
def test_inplace_masked_fill_error(self):
t = torch.randn(3, 3, device='meta')
with self.assertRaisesRegex(RuntimeError, "doesn't match the broadcast"):
t.masked_fill_((t > 0).unsqueeze(0), 0.1)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
test_meta_autograd_no_error
|
def test_meta_autograd_no_error(self):
lib = torch.library.Library("meta_test", "DEF")
impl_cpu = torch.library.Library("meta_test", "IMPL", "CPU")
impl_meta = torch.library.Library("meta_test", "IMPL", "Meta")
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
del impl_meta
del impl_cpu
del lib
|
def test_meta_autograd_no_error(self):
with torch.library._scoped_library("meta_test", "DEF") as lib:
with torch.library._scoped_library("meta_test", "IMPL", "CPU") as impl_cpu:
with torch.library._scoped_library("meta_test", "IMPL", "Meta") as impl_meta:
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
class TestMeta(TestCase):
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
foo_impl
|
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
del impl_meta
del impl_cpu
del lib
|
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
foo_impl
|
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
del impl_meta
del impl_cpu
del lib
|
def foo_impl(x):
return x + 1
lib.define("foo(Tensor a) -> Tensor")
impl_meta.impl("foo", foo_impl)
impl_cpu.impl("foo", foo_impl)
a = torch.ones(2, device='meta')
# The point of the test is that this should not error:
# We have a fallthrough kernel registered to the AutogradMeta
# key for custom ops, so it's fine that `foo()` doesn't have
# an autograd kernel.
b = torch.ops.meta_test.foo.default(a)
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
_fn
|
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
|
def _fn(t, *args, **kwargs):
if isinstance(t, list):
return inplace_variant([x.clone() for x in t], *args, **kwargs)
else:
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
test_meta_inplace
|
def test_meta_inplace(self, device, dtype, op):
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
if func in meta_inplace_skips:
self.skipTest("Skipped")
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if sample_input.broadcasts_input:
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=True):
expected = func(*args, **kwargs)
|
def test_meta_inplace(self, device, dtype, op):
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
if op.promotes_int_to_float and not dtype.is_floating_point:
self.skipTest("Op promotes to float, which is impossible for inplace with non-float input")
if func in meta_inplace_skips:
self.skipTest("Skipped")
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if sample_input.broadcasts_input:
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=True):
expected = func(*args, **kwargs)
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
class TestMeta(TestCase):
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
assertEqualShapes
|
def assertEqualShapes(res, exp):
self.assertIsNone(res) if exp is None else self.assertEqual(exp, res.shape)
assertEqualShapes(grads[0], expected_shapes[0])
assertEqualShapes(grads[1], expected_shapes[1])
assertEqualShapes(grads[2], expected_shapes[2])
out_kwargs = {
f"out{i}": torch.empty(0, device=device, dtype=dtype)
for i in range(len(output_mask))
}
# test call with out parameters
grads = op(*args, output_mask, **out_kwargs)
def assertEqualShapes(res, exp):
self.assertEqual(exp, res.shape) if exp is not None else True
assertEqualShapes(out_kwargs["out0"], expected_shapes[0])
assertEqualShapes(out_kwargs["out1"], expected_shapes[1])
assertEqualShapes(out_kwargs["out2"], expected_shapes[2])
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
assertEqualShapes
|
def assertEqualShapes(res, exp):
self.assertIsNone(res) if exp is None else self.assertEqual(exp, res.shape)
assertEqualShapes(grads[0], expected_shapes[0])
assertEqualShapes(grads[1], expected_shapes[1])
assertEqualShapes(grads[2], expected_shapes[2])
out_kwargs = {
f"out{i}": torch.empty(0, device=device, dtype=dtype)
for i in range(len(output_mask))
}
# test call with out parameters
grads = op(*args, output_mask, **out_kwargs)
def assertEqualShapes(res, exp):
self.assertEqual(exp, res.shape) if exp is not None else True
assertEqualShapes(out_kwargs["out0"], expected_shapes[0])
assertEqualShapes(out_kwargs["out1"], expected_shapes[1])
assertEqualShapes(out_kwargs["out2"], expected_shapes[2])
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
test_batch_norm_backward
|
def test_batch_norm_backward(self, output_mask):
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
# input, (args) num_groups, (kwargs) weight, bias eps
device = "meta"
dtype = torch.float32
samples = sample_inputs_batch_norm(None, device, dtype, requires_grad=False)
for sample in samples:
with self.subTest(sample=sample):
if sample.input.dim() < 2:
continue
grad_out = torch.ones_like(sample.input)
running_mean, running_var, weight, bias = sample.args
train = sample.kwargs.get("training", True)
save_mean = torch.zeros((sample.input.shape[1], ), device=device, dtype=dtype) if train else None
save_invstd = torch.zeros((sample.input.shape[1], ), device=device, dtype=dtype) if train else None
args = [grad_out, sample.input, weight, running_mean, running_var,
save_mean, save_invstd, train, sample.kwargs.get("eps", 1e-5)]
expected_shapes = (
sample.input.shape,
torch.Size([sample.input.shape[1]]) if output_mask[1] else None,
torch.Size([sample.input.shape[1]]) if output_mask[2] else None)
self._norm_backwards_test_helper(torch.ops.aten.native_batch_norm_backward,
args, output_mask, expected_shapes)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_metal.py
|
test_conv
|
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["metal_prepack::conv2d_prepack"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["metal_prepack::conv2d_prepack"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
|
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["metal_prepack::conv2d_prepack"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["metal_prepack::conv2d_prepack"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_metal.py
|
__init__
|
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_metal.py
|
__init__
|
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_metal.py
|
__init__
|
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_conversion
|
def test_conversion(self):
for cpu_tensor in [torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')),
torch.randn((1, 2, 3, 4, 5),
dtype=torch.float, device=torch.device('cpu'))[:, :, :, :, 1]]:
cpu_tensor.requires_grad_()
# float cpu tensor to mkldnn float tensor or bfloat tensor.
for dtype1 in types:
mkldnn_tensor = cpu_tensor.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in types:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
atol = 1e-5 if dtype1 == torch.float and dtype2 == torch.float else 1e-2
self.assertEqual(cpu_tensor, cpu_tensor_2.float(), atol=atol, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.float:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size() / 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
# bfloat cpu tensor to mkldnn float tensor or bfloat tensor.
cpu_tensor_bf16 = cpu_tensor.bfloat16()
for dtype1 in types:
mkldnn_tensor = cpu_tensor_bf16.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in types:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
self.assertEqual(cpu_tensor_bf16, cpu_tensor_2.bfloat16(), atol=1e-5, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.bfloat16:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_bf16.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_bf16.element_size() * 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
|
def test_conversion(self):
for cpu_tensor in [torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')),
torch.randn((1, 2, 3, 4, 5),
dtype=torch.float, device=torch.device('cpu'))[:, :, :, :, 1]]:
cpu_tensor.requires_grad_()
convert_dtypes = {torch.half: [torch.half, torch.float],
torch.bfloat16: [torch.bfloat16, torch.float],
torch.float: [torch.bfloat16, torch.half]}
# float/bfloat16/half cpu tensor to mkldnn tensortensor.
for dtype1 in types:
mkldnn_tensor = cpu_tensor.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in convert_dtypes[dtype1]:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
atol = 1e-5 if dtype1 == torch.float and dtype2 == torch.float else 1e-2
self.assertEqual(cpu_tensor, cpu_tensor_2.float(), atol=atol, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.float:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size() / 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
# bfloat cpu tensor to mkldnn float tensor or bfloat tensor.
for orig_dtype in [torch.half, torch.bfloat16]:
cpu_tensor_lower = cpu_tensor.to(dtype=orig_dtype)
for dtype1 in convert_dtypes[orig_dtype]:
mkldnn_tensor = cpu_tensor_lower.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat/half tensor to cpu float/bfloat/half tensor
for dtype2 in convert_dtypes[cpu_tensor_lower.dtype]:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
self.assertEqual(cpu_tensor_lower,
cpu_tensor_2.to(dtype=cpu_tensor_lower.dtype), atol=1e-5, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 in [torch.bfloat16, torch.half]:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_lower.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_lower.element_size() * 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_conversion_byte_char
|
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in types:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
self.assertEqual(cpu_tensor_bf16, cpu_tensor_2.bfloat16(), atol=1e-5, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.bfloat16:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_bf16.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_bf16.element_size() * 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
|
def test_conversion_byte_char(self):
int8_types = [torch.int8, torch.uint8]
for int8_type in int8_types:
low = -100 if int8_type is torch.int8 else 0
high = 100
for cpu_tensor in [torch.randint(
low=low,
high=high,
size=(1, 2, 3, 4),
dtype=torch.int64,
device=torch.device('cpu')),
torch.randint(
low=low,
high=high,
size=(1, 2, 3, 4, 5),
dtype=torch.int64,
device=torch.device('cpu'))[:, :, :, :, :]]:
cpu_tensor = cpu_tensor.to(dtype=int8_type)
mkldnn_tensor = cpu_tensor.to_mkldnn(int8_type)
self.assertEqual(mkldnn_tensor.dtype, int8_type)
cpu_tensor_1 = mkldnn_tensor.to_dense()
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
self.assertEqual(cpu_tensor, cpu_tensor_1)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), cpu_tensor.size())
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size())
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mkldnn.py
|
test_unsupported
|
def test_unsupported(self):
# unsupported types and unsupported types with gpu
for dtype in [torch.double, torch.half, torch.uint8, torch.int8,
torch.short, torch.int, torch.long]:
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cpu')).to_mkldnn()
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cuda')).to_mkldnn()
# supported type with gpu
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=torch.float, device=torch.device('cuda')).to_mkldnn()
# some factory functions
for creator in [torch.ones, torch.randn, torch.rand]:
with self.assertRaises(RuntimeError) as context:
creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn)
|
def test_unsupported(self):
# unsupported types and unsupported types with gpu
for dtype in [torch.double, torch.uint8, torch.int8,
torch.short, torch.int, torch.long]:
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cpu')).to_mkldnn()
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cuda')).to_mkldnn()
# supported type with gpu
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=torch.float, device=torch.device('cuda')).to_mkldnn()
# some factory functions
for creator in [torch.ones, torch.randn, torch.rand]:
with self.assertRaises(RuntimeError) as context:
creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_meta.py
|
test_embedding_bag_dense_backward
|
def test_embedding_bag_dense_backward(self, mode):
weight = torch.randn(4, 3, requires_grad=True)
indices = torch.tensor([1, 0, 2, 1, 3])
offsets = torch.tensor([0, 2, 3, 5])
scale_grad_by_freq = False
sparse = False
per_sample_weights = None
include_last_offset = False
padding_idx = -1
output, offset2bag, bag_size, maximum_indices = torch.ops.aten._embedding_bag.default(
weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx
)
grad = torch.randn_like(output)
# Call the function with example inputs
grad_weight = torch.ops.aten._embedding_bag_dense_backward.default(
grad, indices, offset2bag, bag_size, maximum_indices, weight.size(0),
scale_grad_by_freq, mode, per_sample_weights, padding_idx
)
meta_grad_weight = torch.ops.aten._embedding_bag_dense_backward.default(
grad.to('meta'), indices.to('meta'), offset2bag.to('meta'), bag_size.to('meta'),
maximum_indices.to('meta'), weight.size(0),
scale_grad_by_freq, mode, per_sample_weights, padding_idx
)
self.assertEqual(grad_weight.to('meta'), meta_grad_weight)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
test_embedding_bag_byte_prepack
|
def test_embedding_bag_byte_prepack(self):
batch_size = 10
num_embeddings = 80
embedding_dim = [128, 256, 512]
res_shape = [[batch_size, num_embeddings, ed + 8] for ed in embedding_dim]
for ed, rs in zip(embedding_dim, res_shape):
weight = torch.randn(batch_size, num_embeddings, ed, dtype=torch.float32)
res = torch.ops.quantized.embedding_bag_byte_prepack(weight.to(device="meta"))
self.assertEqual(res.shape, rs)
self.assertEqual(res.dtype, torch.float32)
self.assertEqual(res.untyped_storage().data_ptr(), 0)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.