library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/torch_np/test_unary_ufuncs.py
test_arccos
def test_arccos(self): assert_allclose(np.arccos(0.5), arccos(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_arccosh
def test_arccosh(self): assert_allclose(np.arccosh(1.5), arccosh(1.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_arcsin
def test_arcsin(self): assert_allclose(np.arcsin(0.5), arcsin(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_arcsinh
def test_arcsinh(self): assert_allclose(np.arcsinh(0.5), arcsinh(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_arctan
def test_arctan(self): assert_allclose(np.arctan(0.5), arctan(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_arctanh
def test_arctanh(self): assert_allclose(np.arctanh(0.5), arctanh(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_cbrt
def test_cbrt(self): assert_allclose(np.cbrt(0.5), cbrt(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_ceil
def test_ceil(self): assert_allclose(np.ceil(0.5), ceil(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_conjugate
def test_conjugate(self): assert_allclose( np.conjugate(0.5), conjugate(0.5), atol=1e-14, check_dtype=False )
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_tanh
def test_tanh(self): assert_allclose(np.tanh(0.5), tanh(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/test_unary_ufuncs.py
test_trunc
def test_trunc(self): assert_allclose(np.trunc(0.5), trunc(0.5), atol=1e-14, check_dtype=False)
import numpy as np from torch._numpy._ufuncs import * # noqa: F403 from torch._numpy.testing import assert_allclose from torch.testing._internal.common_utils import run_tests, TestCase class TestUnaryUfuncs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
func
def func(*inputs): if use_bias: lx, lweight, lbias = inputs else: lx, lweight = inputs lbias = None out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups) return out if use_bias: inputs = x, weight, bias else: inputs = x, weight dummy_out = func(*inputs) grad_y = torch.randn_like( dummy_out, device=device, dtype=dtype, requires_grad=True ) if dtype == torch.float: (g,) = torch.autograd.grad(dummy_out.sum(), x, create_graph=True) return g.requires_grad return gradgradcheck(func, inputs, (grad_y,))
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
run_test
def run_test(benchmark): conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype) for size in sizes: x = torch.randn(size, device=device, dtype=dtype) out = conv(x.detach().clone().requires_grad_()) out.backward(torch.ones_like(out)) run_test(benchmark=False) run_test(benchmark=True)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_double_backward
def test_conv_double_backward(self, device, dtype): with torch.backends.cudnn.flags(enabled=True, deterministic=True): batch_size = 1 for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]: for stride, padding, chan_in, chan_out, dilation in product( [1], [2], [2], [3], dilations ): no_weight = stride == 2 result = self.run_conv_double_back_test( kern, stride, padding, chan_in, chan_out, batch_size, inp_size, dilation, no_weight, use_xpu=True, dtype=dtype, ) self.assertTrue(result, "Conv double backward test failed")
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
_test
def _test(t, weight, mode): t_a = t.view(-1).cpu().numpy() w_a = weight.view(-1).cpu().numpy() expected = scipy.signal.convolve(t_a, w_a, mode=mode) kwargs = {"padding": mode} if mode == "same": p = weight.shape[2] // 2 t = torch.nn.functional.pad(t, (p, p)) kwargs.pop("padding") weight_flipped = torch.flip(weight, (2,)) actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0) if mode == "same": actual = actual[:feat_dim] self.assertEqual(actual, expected, atol=2e-5, rtol=2e-5) with set_default_dtype(torch.float): _test(t, weight_even, mode) _test(t, weight_odd, mode)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
_test
def _test(t, weight, mode): t_a = t.view(-1).cpu().numpy() w_a = weight.view(-1).cpu().numpy() expected = scipy.signal.convolve(t_a, w_a, mode=mode) kwargs = {"padding": mode} if mode == "same": p = weight.shape[2] // 2 t = torch.nn.functional.pad(t, (p, p)) kwargs.pop("padding") weight_flipped = torch.flip(weight, (2,)) actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0) if mode == "same": actual = actual[:feat_dim] self.assertEqual(actual, expected, atol=2e-5, rtol=2e-5) with set_default_dtype(torch.float): _test(t, weight_even, mode) _test(t, weight_odd, mode)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
_test
def _test(t, weight, mode): t_a = t.view(-1).cpu().numpy() w_a = weight.view(-1).cpu().numpy() expected = scipy.signal.convolve(t_a, w_a, mode=mode) kwargs = {"padding": mode} if mode == "same": p = weight.shape[2] // 2 t = torch.nn.functional.pad(t, (p, p)) kwargs.pop("padding") weight_flipped = torch.flip(weight, (2,)) actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0) if mode == "same": actual = actual[:feat_dim] self.assertEqual(actual, expected, atol=2e-5, rtol=2e-5) with set_default_dtype(torch.float): _test(t, weight_even, mode) _test(t, weight_odd, mode)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_double_backward_no_bias
def test_conv_double_backward_no_bias(self): kern, stride = 3, 2 chan_in, chan_out = 2, 4 batch_size, inp_size = 2, 5 padding, dilation = 1, 1 no_weight, use_bias = False, True result = self.run_conv_double_back_test( kern, stride, padding, chan_in, chan_out, batch_size, inp_size, dilation, no_weight, use_bias=use_bias, ) self.assertTrue(result, "Conv double backward test failed")
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_double_backward_groups
def test_conv_double_backward_groups(self): kern, stride, padding = 3, 1, 2 chan_in, chan_out = 2, 4 batch_size, inp_size, dilation = 2, 6, 1 no_weight = False groups = 2 result = self.run_conv_double_back_test( kern, stride, padding, chan_in * groups, chan_out * groups, batch_size, inp_size, dilation, no_weight, groups=groups, ) self.assertTrue(result, "Conv double backward test failed")
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_Conv2d_size_1_kernel
def test_Conv2d_size_1_kernel(self, device): x_cpu = torch.randn(2, 3, 5, 5) conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1) y_cpu = conv_cpu(x_cpu) y = torch.rand_like(y_cpu) y_cpu.backward(y) with cudnn.flags(enabled=False): conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device) conv_cuda.bias.data.copy_(conv_cpu.bias.data) conv_cuda.weight.data.copy_(conv_cpu.weight.data) y_cuda = conv_cuda(x_cpu.to(device)) y_cuda.backward(y.to(device)) self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False) self.assertEqual( conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False, ) self.assertEqual( conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False, )
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_ConvTranspose2d_size_1_kernel
def test_ConvTranspose2d_size_1_kernel(self, device): x_cpu = torch.randn(2, 3, 5, 5) conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1) y_cpu = conv_cpu(x_cpu) y = torch.rand_like(y_cpu) y_cpu.backward(y) conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device) conv_cuda.bias.data.copy_(conv_cpu.bias.data) conv_cuda.weight.data.copy_(conv_cpu.weight.data) y_cuda = conv_cuda(x_cpu.to(device)) y_cuda.backward(y.to(device)) self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False) self.assertEqual( conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False, ) self.assertEqual( conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False, )
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
conv2d_depthwise
def conv2d_depthwise(x, weight): return torch.nn.functional.conv2d( x, weight, bias=None, stride=(1, 10), groups=2 ) torch.autograd.gradcheck(conv2d_depthwise, (x, weight))
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
helper
def helper(n, c, h, w, out_channels, kernel_size, groups): input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device).to( memory_format=torch.channels_last ) input.requires_grad_() conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups).to( device=device, dtype=dtype, memory_format=torch.channels_last ) for p in conv.parameters(): p.data = torch.randint_like(p, -3, 3) ref_input = input.detach().clone().contiguous().double().requires_grad_() ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups) ref_conv.load_state_dict(conv.state_dict()) ref_conv = ref_conv.to( device=device, dtype=torch.double, memory_format=torch.contiguous_format ) out = conv(input) ref_out = ref_conv(ref_input) grad = torch.randint_like(out, -3, 3) ref_grad = grad.detach().clone().double().contiguous() out.backward(grad) ref_out.backward(ref_grad) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last)) self.assertTrue( conv.weight.grad.is_contiguous(memory_format=torch.channels_last) ) self.assertTrue(ref_out.is_contiguous()) self.assertTrue(ref_input.grad.is_contiguous()) self.assertTrue(ref_conv.weight.grad.is_contiguous()) self.assertEqual(out, ref_out, exact_dtype=False) self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False) self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False) self.assertEqual(input.grad, ref_input.grad, exact_dtype=False) helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1) helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8) helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1) helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
helper
def helper(n, c, h, w, out_channels, kernel_size, groups): input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device).to( memory_format=torch.channels_last ) input.requires_grad_() conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups).to( device=device, dtype=dtype, memory_format=torch.channels_last ) for p in conv.parameters(): p.data = torch.randint_like(p, -3, 3) ref_input = input.detach().clone().contiguous().double().requires_grad_() ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups) ref_conv.load_state_dict(conv.state_dict()) ref_conv = ref_conv.to( device=device, dtype=torch.double, memory_format=torch.contiguous_format ) out = conv(input) ref_out = ref_conv(ref_input) grad = torch.randint_like(out, -3, 3) ref_grad = grad.detach().clone().double().contiguous() out.backward(grad) ref_out.backward(ref_grad) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last)) self.assertTrue( conv.weight.grad.is_contiguous(memory_format=torch.channels_last) ) self.assertTrue(ref_out.is_contiguous()) self.assertTrue(ref_input.grad.is_contiguous()) self.assertTrue(ref_conv.weight.grad.is_contiguous()) self.assertEqual(out, ref_out, exact_dtype=False) self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False) self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False) self.assertEqual(input.grad, ref_input.grad, exact_dtype=False) helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1) helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8) helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1) helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_large_bmm_backward
def test_large_bmm_backward(self, device): A = torch.randn([1024, 2, 1024], device=device).mT.contiguous().mT B = torch.randn([1, 1024, 65536], device=device, requires_grad=True) G = torch.randn([1024, 2, 65536], device=device) # Should not create an intermediary tensor of size [1024, 1024, 65536] (256GB of memory) and OOM (A @ B).backward(G)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_large_bmm_mm_backward
def test_large_bmm_mm_backward(self, device): A = torch.randn([1024, 2, 1024], device=device).mT.contiguous().mT B = torch.randn([1024, 65536], device=device, requires_grad=True) G = torch.randn([1024, 2, 65536], device=device) # Should not create an intermediary tensor of size [1024, 1024, 65536] (256GB of memory) and OOM (A @ B).backward(G)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
assertEqual
def assertEqual(answer, expected): if x.dtype.is_floating_point or x.dtype.is_complex: k = max(x.shape[-1], 1) # Scale the atol with the size of the matrix self.assertEqual( answer, expected, msg=f"{x.shape} x {y.shape} = {answer.shape}", atol=k * 5e-5, rtol=1e-4, ) else: self.assertEqual( answer, expected, msg=f"{x.shape} x {y.shape} = {answer.shape}" ) # test x @ y expected = np.matmul(x.cpu(), y.cpu()) ans = torch.matmul(x, y) self.assertTrue(ans.is_contiguous()) assertEqual(ans, expected) # test out out = torch.empty_like(ans) ans = torch.matmul(x, y, out=out) self.assertIs(ans, out) self.assertTrue(ans.is_contiguous()) assertEqual(ans, expected)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_matmul_out_kernel_errors_with_autograd
def test_matmul_out_kernel_errors_with_autograd(self, device, dtype): a = torch.empty( (256, 512), device=device, dtype=dtype, requires_grad=True ).unsqueeze(0) b = torch.empty( (4, 128, 512), device=device, dtype=dtype, requires_grad=True ).transpose(-1, -2) c = torch.empty((256, 4, 128), device=device, dtype=dtype).movedim(1, 0) torch.matmul(a.detach(), b.detach(), out=c) with self.assertRaisesRegex( RuntimeError, "functions with out=... arguments don't support automatic differentiation", ): torch.matmul(a, b, out=c) with torch.no_grad(): torch.matmul(a, b, out=c)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv2d_no_grad
def test_conv2d_no_grad(self, device, dtype): for batch in [1, 2, 3]: for groups in [1, 2, 4]: input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device) m = nn.Conv2d( groups, 8, kernel_size=(3, 3), groups=groups, dtype=dtype, device=device, ) with torch.no_grad(): output_ng = m(input) output = m(input) self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_channels_last_ouput_stride
def test_channels_last_ouput_stride(self, device, dtype): input = torch.randn( (2, 3, 16, 16), device=device, dtype=dtype, requires_grad=True ) weight = torch.randn( (512, 3, 3, 3), device=device, dtype=dtype, requires_grad=True ) input = input.to(memory_format=torch.channels_last) weight = weight.to(memory_format=torch.channels_last) out = torch.conv2d(input, weight, None, (2, 2), (0, 0), (1, 1), 1) if dtype is torch.float64: # Like most conv backend, xpu does not support float64 for chanel last conv. # input NHWC, output NCHW assert_size_stride(out, (2, 512, 7, 7), (25088, 49, 7, 1)) else: # input NHWC, output NHWC assert_size_stride(out, (2, 512, 7, 7), (25088, 1, 3584, 512))
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
maybe_transpose
def maybe_transpose(cond, m): if not cond: return m return m.t().clone(memory_format=torch.contiguous_format).t() M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype)) m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype)) m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype)) self._test_addmm_addmv( func, M, m1, m2, transpose_out=t4, activation=activation ) if t1: # use vector V instead of matrix M for epilogue fusion in CUDA (doesn't depend on t1) self._test_addmm_addmv( func, V, m1, m2, beta=1, transpose_out=t4, activation=activation, )
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
_test_mm
def _test_mm(n, m, p, dtype, genf): # helper function def matrixmultiply(mat1, mat2): n = mat1.size(0) m = mat1.size(1) p = mat2.size(1) dtype_ = torch.float if dtype == torch.half else dtype if dtype == torch.half: mat1 = mat1.float() mat2 = mat2.float() res = torch.zeros(n, p, dtype=dtype_, device=device) for i, j in iter_indices(res): res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m)) return res.half() if dtype == torch.half else res # contiguous case mat1 = genf(n, m) mat2 = genf(m, p) res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # non contiguous case 1 mat1 = genf(n, m) mat2 = genf(p, m).t() res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # non contiguous case 2 mat1 = genf(m, n).t() mat2 = genf(m, p) res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # non contiguous case 3 mat1 = genf(m, n).t() mat2 = genf(p, m).t() res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # test with zero stride mat1 = genf(n, m) mat2 = genf(m, 1).expand(m, p) res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # explicitly exercise the _out variant in torch.mm(). # contiguous case mat1 = genf(n, m) mat2 = genf(m, p) res = genf(n, p) torch.mm(mat1, mat2, out=res) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # explicitly exercise the _out variant in torch.mm(). # non contiguous case 3 mat1 = genf(m, n).t() mat2 = genf(p, m).t() res = genf(n, p) torch.mm(mat1, mat2, out=res) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
matrixmultiply
def matrixmultiply(mat1, mat2): n = mat1.size(0) m = mat1.size(1) p = mat2.size(1) dtype_ = torch.float if dtype == torch.half else dtype if dtype == torch.half: mat1 = mat1.float() mat2 = mat2.float() res = torch.zeros(n, p, dtype=dtype_, device=device) for i, j in iter_indices(res): res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m)) return res.half() if dtype == torch.half else res # contiguous case mat1 = genf(n, m) mat2 = genf(m, p) res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # non contiguous case 1 mat1 = genf(n, m) mat2 = genf(p, m).t() res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # non contiguous case 2 mat1 = genf(m, n).t() mat2 = genf(m, p) res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # non contiguous case 3 mat1 = genf(m, n).t() mat2 = genf(p, m).t() res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # test with zero stride mat1 = genf(n, m) mat2 = genf(m, 1).expand(m, p) res = torch.mm(mat1, mat2) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # explicitly exercise the _out variant in torch.mm(). # contiguous case mat1 = genf(n, m) mat2 = genf(m, p) res = genf(n, p) torch.mm(mat1, mat2, out=res) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2) # explicitly exercise the _out variant in torch.mm(). # non contiguous case 3 mat1 = genf(m, n).t() mat2 = genf(p, m).t() res = genf(n, p) torch.mm(mat1, mat2, out=res) res2 = matrixmultiply(mat1, mat2) self.assertEqual(res, res2)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
genf_int
def genf_int(x, y): return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_empty_channel
def test_conv_empty_channel(self, device, dtype): in_channels = 0 mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2, dtype=dtype).to(device) inp = torch.randn(2, 0, 15, device=device, dtype=dtype) _test_module_empty_input(self, mod, inp, check_size=False) with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"): inp = torch.randn(2, 1, 0, device=device, dtype=dtype) mod(inp) mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2, dtype=dtype).to(device) inp = torch.randn(2, 0, 50, 100, device=device, dtype=dtype) _test_module_empty_input(self, mod, inp, check_size=False) with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"): inp = torch.randn(2, 1, 40, 0, device=device, dtype=dtype) mod(inp) mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2, dtype=dtype).to(device) inp = torch.randn(2, 0, 50, 20, 40, device=device, dtype=dtype) _test_module_empty_input(self, mod, inp, check_size=False) with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"): inp = torch.randn(2, 1, 50, 0, 40, device=device, dtype=dtype) mod(inp)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_group_conv_empty
def test_group_conv_empty(self, device): mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to( device ) inp = torch.randn(0, 4, 4, 4, device=device) _test_module_empty_input(self, mod, inp, check_size=False)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_group_convTranspose_empty
def test_group_convTranspose_empty(self, device): mod = torch.nn.ConvTranspose2d( 4, 4, stride=2, kernel_size=3, padding=1, groups=4 ).to(device) inp = torch.randn(0, 4, 4, 4, device=device) _test_module_empty_input(self, mod, inp, check_size=False)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_convTranspose_empty
def test_convTranspose_empty(self, device): mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to( device ) inp = torch.randn(0, 4, 4, 4, device=device) _test_module_empty_input(self, mod, inp, check_size=False)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_large_nosplit
def test_conv_large_nosplit(self, device): dtype = torch.half conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype) input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device) conv1(input_large) conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype) input_large = torch.randn(1, 1, 2048, 1024, dtype=dtype, device=device) conv2(input_large)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_noncontig_weights
def test_conv_noncontig_weights(self, device): for dim in (1, 2, 3): for grouped in (False, True): nc = 3 groups = 3 if grouped else 1 w = torch.randn([3] * dim, device=device) w = w.expand([nc, int(nc / groups)] + list(w.shape)) w = w.detach().requires_grad_() x = torch.randn( [1, nc] + ([5] * dim), device=device, requires_grad=True ) y = getattr(F, f"conv{dim}d")(x, w, groups=groups) y.sum().backward() y = getattr(F, f"conv_transpose{dim}d")(x, w, groups=groups) y.sum().backward()
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_noncontig_weights_and_bias
def test_conv_noncontig_weights_and_bias(self, device): for bias in [True, False]: conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=bias).to( device, torch.float ) input_nc = torch.randn( (1, 3, 224, 224, 2), device=device, dtype=torch.float )[:, :, :, :, 1] input_c = input_nc.contiguous() weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[ :, :, :, :, 1 ] conv1.weight = nn.Parameter(weight_nc) weight_c = conv1.weight.contiguous() if bias: bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1] conv1.bias = nn.Parameter(bias_nc) bias_c = conv1.bias.contiguous() out1 = conv1(input_nc) conv1.weight = nn.Parameter(weight_c) if bias: conv1.bias = nn.Parameter(bias_c) out2 = conv1(input_c) self.assertEqual(out1, out2)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_transposed_large
def test_conv_transposed_large(self, device): dtype = torch.half if self.device_type == "cuda" else torch.float conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype) input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device) ret = conv(input_large) maxdiff0 = ( (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))) .abs_() .max() .item() ) maxdiff1 = ( (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))) .abs_() .max() .item() ) maxdiff2 = ( (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))) .abs_() .max() .item() ) maxdiff3 = ( (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))) .abs_() .max() .item() ) self.assertEqual(maxdiff0, 0) self.assertEqual(maxdiff1, 0) self.assertEqual(maxdiff2, 0) self.assertEqual(maxdiff3, 0)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_conv.py
test_conv_large
def test_conv_large(self, device): dtype = torch.half if self.device_type == "cuda" else torch.float conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype) input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device) ret = conv(input_large) self.assertEqual(ret[:2048], conv(input_large[:2048])) self.assertEqual(ret[2048:4096], conv(input_large[2048:4096])) self.assertEqual(ret[4096:], conv(input_large[4096:])) conv.zero_grad() ret.view(4097, -1).max(dim=1).values.sum().backward() del ret grad1 = conv.weight.grad.detach().clone() conv.zero_grad() conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward() conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward() conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward() grad2 = conv.weight.grad.detach().clone() scale = 1 / grad2.abs().mean() grad1 = grad1 * scale grad2 = grad2 * scale self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3)
import itertools import math import unittest from itertools import product import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch._C._dynamo.guards import assert_size_stride from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_is_not_fp32 from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyXPU, ) from torch.testing._internal.common_dtype import floating_types_and from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase from torch.testing._internal.common_utils import ( dtype2prec_DONTUSE, gradcheck, gradgradcheck, parametrize as parametrize_test, run_tests, set_default_dtype, TEST_SCIPY, TEST_WITH_ROCM, ) AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() import scipy.ndimage import scipy.signal class TestConvolutionNNDeviceType(NNTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
genf_bfloat
def genf_bfloat(x, y): return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
genf_float
def genf_float(x, y): return torch.randn(x, y, dtype=dtype, device=device)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
genf_Half
def genf_Half(x, y): return torch.randn(x, y, dtype=dtype, device=device) for n, m, p in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]: if (dtype == torch.int32) or (dtype == torch.int64): genf = genf_int elif dtype == torch.bfloat16: genf = genf_bfloat elif dtype == torch.half: genf = genf_Half else: genf = genf_float _test_mm(n, m, p, dtype, genf)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_bmm
def test_bmm(self, device, dtype): batch_sizes = [1, 10] M, N, O = 23, 15, 12 numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32 def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2]) def generate_inputs(num_batches): # transposed tensors for perm1, perm2 in itertools.product( itertools.permutations((0, 1, 2)), repeat=2 ): b1 = make_tensor( (num_batches, M, N), dtype=dtype, device=device, low=-0.1, high=0.1 ) b2 = make_tensor( (num_batches, N, O), dtype=dtype, device=device, low=-0.1, high=0.1 ) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) yield b1, b2 # broadcasting tensors for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1) shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1) b1 = make_tensor( shape1, dtype=dtype, device=device, low=-0.1, high=0.1 ).expand(num_batches, M, N) b2 = make_tensor( shape2, dtype=dtype, device=device, low=-0.1, high=0.1 ).expand(num_batches, N, O) yield b1, b2 # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = torch.randn(shape1, dtype=dtype, device=device) b2 = torch.randn(shape2, dtype=dtype, device=device) yield b1, b2 for num_batches in batch_sizes: for (b1, b2), perm3 in itertools.product( generate_inputs(num_batches), itertools.permutations((0, 1, 2)) ): res1 = torch.bmm(b1, b2) res2 = ( torch.full( (num_batches, M, O), math.nan, dtype=dtype, device=device ) .permute(perm3) .contiguous() .permute(invert_perm(perm3)) ) torch.bmm(b1, b2, out=res2) expect = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ).to(device=device, dtype=dtype) self.assertEqual(expect, res1) self.assertEqual(expect, res2) if self.device_type == "cuda": # check that mixed arguments are rejected self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu())) self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2)) self.assertRaises( RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()) )
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
invert_perm
def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2])
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
generate_inputs
def generate_inputs(num_batches): # transposed tensors for perm1, perm2 in itertools.product( itertools.permutations((0, 1, 2)), repeat=2 ): b1 = make_tensor( (num_batches, M, N), dtype=dtype, device=device, low=-0.1, high=0.1 ) b2 = make_tensor( (num_batches, N, O), dtype=dtype, device=device, low=-0.1, high=0.1 ) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) yield b1, b2 # broadcasting tensors for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1) shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1) b1 = make_tensor( shape1, dtype=dtype, device=device, low=-0.1, high=0.1 ).expand(num_batches, M, N) b2 = make_tensor( shape2, dtype=dtype, device=device, low=-0.1, high=0.1 ).expand(num_batches, N, O) yield b1, b2 # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = torch.randn(shape1, dtype=dtype, device=device) b2 = torch.randn(shape2, dtype=dtype, device=device) yield b1, b2 for num_batches in batch_sizes: for (b1, b2), perm3 in itertools.product( generate_inputs(num_batches), itertools.permutations((0, 1, 2)) ): res1 = torch.bmm(b1, b2) res2 = ( torch.full( (num_batches, M, O), math.nan, dtype=dtype, device=device ) .permute(perm3) .contiguous() .permute(invert_perm(perm3)) ) torch.bmm(b1, b2, out=res2) expect = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ).to(device=device, dtype=dtype) self.assertEqual(expect, res1) self.assertEqual(expect, res2) if self.device_type == "cuda": # check that mixed arguments are rejected self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu())) self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2)) self.assertRaises( RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()) )
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
invert_perm
def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2])
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
generate_tensor
def generate_tensor(): numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32 # transposed tensors for perm1, perm2 in itertools.product( itertools.permutations((0, 1, 2)), repeat=2 ): for perm3 in itertools.permutations((0, 1)): b1 = ( make_tensor( (num_batches, M, N), dtype=dtype, device=device, low=-1, high=1, ) * 0.1 ) b2 = ( make_tensor( (num_batches, N, O), dtype=dtype, device=device, low=-1, high=1, ) * 0.1 ) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) ref = ( torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ) .to(device=device, dtype=dtype) .sum(0) ) out_tensor = ( torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3) ) yield b1, b2, ref, out_tensor # broadcasting tensors for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1) shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1) b1 = ( make_tensor( shape1, dtype=dtype, device=device, low=-1, high=1 ).expand(num_batches, M, N) * 0.1 ) b2 = ( make_tensor( shape2, dtype=dtype, device=device, low=-1, high=1 ).expand(num_batches, N, O) * 0.1 ) ref = ( torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ) .to(device=device, dtype=dtype) .sum(0) ) out_tensor = torch.zeros_like(ref) yield b1, b2, ref, out_tensor # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = ( make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1) * 0.1 ) b2 = ( make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1) * 0.1 ) ref = ( torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ) .to(device=device, dtype=dtype) .sum(0) ) out_tensor = torch.zeros_like(ref) yield b1, b2, ref, out_tensor for b1, b2, ref, out_tensor in generate_tensor(): self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
_test
def _test(row_major, incx, incy, lda_tail): if row_major: a_storage = torch.full( (o, s + lda_tail), float("nan"), device=device, dtype=dtype ) else: a_storage = torch.full( (s, o + lda_tail), float("nan"), device=device, dtype=dtype ).permute(1, 0) a = a_storage[:o, :s].copy_(a_data) x_storage = torch.full((s, incx), float("nan"), device=device, dtype=dtype) x = x_storage[:, 0].copy_(x_data) y_storage = torch.full((o, incy), float("nan"), device=device, dtype=dtype) y = y_storage[:, 0].copy_(y_data) self._test_addmm_addmv(torch.addmv, y, a, x) for row_major, incx, incy, lda_tail in itertools.product( (False, True), (1, 2), (1, 2), (0, 1) ): _test(row_major, incx, incy, lda_tail)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_corner_cases_of_cublasltmatmul
def test_corner_cases_of_cublasltmatmul(self, device, dtype): # common case M = torch.randn(128, device=device).to(dtype) m1 = torch.randn(2048, 2400, device=device).to(dtype) m2 = torch.randn(128, 2400, device=device).to(dtype) torch.nn.functional.linear(m1, m2, M) # Ntrans_B has ld >> rows m1 = torch.rand([128, 2400]).to(dtype).to(device).t() m2 = torch.rand([2048, 25272]).to(dtype).to(device).t()[21940:24340] M = torch.rand([128]).to(dtype).to(device) torch.addmm(M, m2.t(), m1) # trans_A has ld >> rows m1 = torch.rand([128, 25272]).to(dtype).to(device)[:, 21940:24340].t() m2 = torch.randn(2048, 2400, device=device).to(dtype) M = torch.rand([128]).to(dtype).to(device) torch.addmm(M, m2, m1) # large tensor dim > 65535 M = torch.randn(16, device=device).to(dtype) m1 = torch.randn(32, 131071, device=device).to(dtype) m2 = torch.randn(16, 131071, device=device).to(dtype) torch.nn.functional.linear(m1, m2, M)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_baddbmm
def test_baddbmm(self, device, dtype): num_batches = 10 M, N, O = 12, 8, 50 def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2]) def generate_tensor(): numpy_dtype = ( dtype if dtype not in [torch.bfloat16, torch.half] else torch.float32 ) # transposed tensors for perm1, perm2, perm3 in itertools.product( itertools.permutations((0, 1, 2)), repeat=3 ): b1 = make_tensor( (num_batches, M, N), dtype=dtype, device=device, low=-1, high=1 ) b2 = make_tensor( (num_batches, N, O), dtype=dtype, device=device, low=-1, high=1 ) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) ref = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ).to(device=device, dtype=dtype) out_tensor = torch.zeros_like(ref) out_tensor = ( out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3)) ) yield b1, b2, ref, out_tensor # broadcasting tensors for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1) shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1) b1 = make_tensor( shape1, dtype=dtype, device=device, low=-1, high=1 ).expand(num_batches, M, N) b2 = make_tensor( shape2, dtype=dtype, device=device, low=-1, high=1 ).expand(num_batches, N, O) ref = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ).to(device=device, dtype=dtype) out_tensor = torch.zeros_like(ref) yield b1, b2, ref, out_tensor # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = make_tensor(shape1, dtype=dtype, device=device, low=-2, high=2) b2 = make_tensor(shape2, dtype=dtype, device=device, low=-2, high=2) ref = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ).to(device=device, dtype=dtype) out_tensor = torch.zeros_like(ref) yield b1, b2, ref, out_tensor for b1, b2, ref, out_tensor in generate_tensor(): self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
invert_perm
def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2])
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
generate_tensor
def generate_tensor(): numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32 # transposed tensors for perm1, perm2 in itertools.product( itertools.permutations((0, 1, 2)), repeat=2 ): for perm3 in itertools.permutations((0, 1)): b1 = ( make_tensor( (num_batches, M, N), dtype=dtype, device=device, low=-1, high=1, ) * 0.1 ) b2 = ( make_tensor( (num_batches, N, O), dtype=dtype, device=device, low=-1, high=1, ) * 0.1 ) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) ref = ( torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ) .to(device=device, dtype=dtype) .sum(0) ) out_tensor = ( torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3) ) yield b1, b2, ref, out_tensor # broadcasting tensors for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1) shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1) b1 = ( make_tensor( shape1, dtype=dtype, device=device, low=-1, high=1 ).expand(num_batches, M, N) * 0.1 ) b2 = ( make_tensor( shape2, dtype=dtype, device=device, low=-1, high=1 ).expand(num_batches, N, O) * 0.1 ) ref = ( torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ) .to(device=device, dtype=dtype) .sum(0) ) out_tensor = torch.zeros_like(ref) yield b1, b2, ref, out_tensor # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = ( make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1) * 0.1 ) b2 = ( make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1) * 0.1 ) ref = ( torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy() ) .to(device=device, dtype=dtype) .sum(0) ) out_tensor = torch.zeros_like(ref) yield b1, b2, ref, out_tensor for b1, b2, ref, out_tensor in generate_tensor(): self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_1_sized_with_0_strided
def test_1_sized_with_0_strided(self, device, dtype): a = make_tensor((8, 1, 64), dtype=dtype, device=device) a_strided = torch.as_strided(a, size=[8, 1, 64], stride=[64, 0, 1]) b = make_tensor((8, 64, 512), dtype=dtype, device=device) b_strided = torch.as_strided(b, size=[8, 64, 512], stride=[64, 1, 512]) res = torch.bmm(a_strided, b_strided) expect = torch.from_numpy(a_strided.cpu().numpy() @ b_strided.cpu().numpy()).to( device=device, dtype=dtype ) self.assertEqual(expect, res)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
_select_broadcastable_dims
def _select_broadcastable_dims(self, dims_full=None): # select full dimensionality if dims_full is None: dims_full = [] ndims = random.randint(1, 4) dims_full = [random.randint(1, 8) for _ in range(ndims)] else: ndims = len(dims_full) # select actual dimensions for ops: # larger: full ndims, individual sizes may be reduced # smaller: possibly reduced ndims, sizes may be reduced smaller_ndims = random.randint(1, ndims) dims_small = [] dims_large = [] for i in range(ndims - 1, -1, -1): j = random.randint(1, 3) if j == 1: # no reduced singleton dimension ds = dims_full[i] dl = dims_full[i] elif j == 2: # larger may have reduced singleton dimension ds = dims_full[i] dl = 1 if len(dims_small) < smaller_ndims else dims_full[i] elif j == 3: # smaller may have reduced singleton dimension ds = 1 dl = dims_full[i] dims_large = [dl] + dims_large if len(dims_small) < smaller_ndims: dims_small = [ds] + dims_small return (dims_small, dims_large, dims_full)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
dims_full_for_fn
def dims_full_for_fn(): if fn == "baddbmm": return ( [batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim], ) elif fn == "addbmm": return ( [n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim], ) elif fn == "addmm": return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim]) elif fn == "addmv": return ([n_dim], [n_dim, m_dim], [m_dim]) elif fn == "addr": return ([n_dim, m_dim], [n_dim], [m_dim]) else: raise AssertionError("unknown function") (t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn() (t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full) t0_small = torch.randn(*t0_dims_small, device=device).float() t1 = torch.randn(*t1_dims, device=device).float() t2 = torch.randn(*t2_dims, device=device).float() t0_full = t0_small.expand(*t0_dims_full).to(device) fntorch = getattr(torch, fn) r0 = fntorch(t0_small, t1, t2) r1 = fntorch(t0_full, t1, t2) self.assertEqual(r0, r1)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_strided_mm_bmm
def test_strided_mm_bmm(self, device, dtype): # Tests strided view case with stride smaller than corresponding dimension size x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype, device=device) new_shape = [2, 2, 2] new_stride = [3, 1, 1] sx = torch.as_strided(x, size=new_shape, stride=new_stride) torch_fn = lambda x: torch.bmm(x, x) # noqa: E731 np_fn = lambda x: np.matmul(x, x) # noqa: E731 self.compare_with_numpy(torch_fn, np_fn, sx) torch_fn = lambda x: torch.mm(x, x) # noqa: E731 self.compare_with_numpy(torch_fn, np_fn, sx[0])
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/xpu/test_gemm.py
test_mm_empty_inputs_mixed_dtype_errors
def test_mm_empty_inputs_mixed_dtype_errors(self, device): a = torch.randint(0, 10, [1, 10], dtype=torch.int16, device=device) b = torch.randn(10, 20, dtype=torch.float32, device=device) with self.assertRaisesRegex( RuntimeError, "expected .* and .* to have the same dtype, but got:" ): torch.mm(a, b)
import itertools import math import random from functools import partial from itertools import product import numpy as np import torch from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, precisionOverride, ) from torch.testing._internal.common_utils import iter_indices, run_tests, TestCase class TestBasicGEMM(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added