text
stringlengths 1
2.05k
|
---|
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.nn.utils |
import get_pad_tuple3d
from tvm.topi.utils |
import get_const_tuple
_conv3d_ncdhw_implement = {
"generic": (topi.nn.conv3d_ncdhw, topi.generic.schedule_conv3d_ncdhw),
"cpu": (topi.x86.conv3d_ncdhw, topi.x86.schedule_conv3d_ncdhw),
"gpu": (topi.cuda.conv3d_ncdhw, topi.cuda.schedule_conv3d_ncdhw),
}
def verify_conv3d_ncdhw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
groups=1,
add_bias=False,
add_relu=False,
):
if isinstance(kernel, (tuple, list)):
if len(kernel) == 3:
kernel_d = kernel[0]
kernel_h = kernel[1]
kernel_w = kernel[2]
else:
raise ValueError("Size of kernel can only be 3")
elif isinstance(kernel, int):
kernel_d = kernel_h = kernel_w = kernel
else:
raise ValueError("Unknown kernel option %s" % kernel)
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
padding_sum = pad_front + pad_back + pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
)
)
in_depth = in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel
bias = te.placeholder((num_filter, 1, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ncdhw.verify_conv3d_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bi |
as_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding, groups)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv3d_ncdhw_implement)
with tvm.target.Target(target):
C = fcompute(
A,
W,
(stride, stride, stride),
padding,
(dilation, dilation, dilation),
groups,
dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel, |
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4, atol=1e-6)
for target, dev in tvm.testing.enabled_targets():
with autotvm.tophub.context(target):
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv3d_ncdhw():
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 0)
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 0)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 1)
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 1)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_relu=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True, add_relu=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, 1, dilation=2)
verify_conv3d_ncdhw(4, 64, 56, 5, 3, 1, 1)
verify_conv3d_ncdhw(2, 2, 2, 2, 2, 2, 2)
verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 3)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 0, 0, 1, 1, 1))
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 2, 1, 2, 1))
verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, (2, 2, 2, 1, 1, 1), dilation=2)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 1, 1))
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 0))
verify_conv3d_ncdhw(1, 32, 32, 1, 3, 1, "VALID")
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, "VALID")
verify_conv3d_ncdhw(1, 32, 56, 16, (3, 5, 7), 2, (1, 2, 3))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 7), 2, (1, 2, 3, 0, 3, 2))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 3, 7), 2, (1, 2, 3))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 3), 2, (1, 3, 1))
verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 0, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 0, groups=4)
verify_conv3d_ncdhw(1, 32, |
32, 8, 1, 1, 1, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 1, groups=4)
if __name__ == "__main__":
test_conv3d_ncdhw() |
"""Example code to do convolution.""" |
import os |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple
_conv3d_ndhwc_implement = {
"generic": (topi.nn.conv3d_ndhwc, topi.generic.schedule_conv3d_ndhwc),
"cpu": (topi.x86.conv3d_ndhwc, topi.x86.schedule_conv3d_ndhwc),
"gpu": (topi.cuda.conv3d_ndhwc, topi.cuda.schedule_conv3d_ndhwc),
}
def verify_conv3d_ndhwc(
target,
dev,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
groups=1,
):
if isinstance(in_size, tuple):
in_depth, in_height, in_width = in_size
else:
in_depth = in_height = in_width = in_size
if isinstance(kernel, tuple):
kernel_depth, kernel_height, kernel_width = kernel
else:
kernel_depth = kernel_height = kernel_width = kernel
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
W = te.placeholder(
(kernel_depth, kernel_height, kernel_width, in_channel
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_ndhwc.v2")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding, groups)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv3d_ndhwc_implement)
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, groups, dtype)
s = fschedule([B])
dev = tvm.device(target, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
print(tvm.lower(s, [A, W, B], target))
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) |
def test_conv3d_ndhwc(target, dev):
verify_conv3d_ndhwc(target, dev, 1, 16, 32, 16, 3, 1, "SAME")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "SAME")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "SAME")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "VALID")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "VALID")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "VALID")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "VALID")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "SAME", dilation=2)
verify_conv3d_ndhwc(target, dev, 1, 1, (20, 256, 256), 32, (1, 3, 3), (1, 2, 2), "SAME")
verify_conv3d_ndhwc(target, dev, 1, 1, (20, 256, 256), 32, (1, 6, 6), (1, 2, 2), (0, 2, 2))
verify_conv3d_ndhwc(target, dev, 1, 4, (20, 256, 256), 8, (1, 5, 5), (1, 2, 2), (0, 2, 2))
verify_conv3d_ndhwc(target, dev, 1, 16, 32, 16, 3, 1, "SAME", groups=4)
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "SAME", groups=4)
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "SAME", groups=4)
if __name__ == "__main__":
test_conv3d_ndhwc() |
"""Example code to do convolution.""" |
import numpy as np |
import tvm
from tvm |
import topi |
import tvm.topi.testing
from tvm |
import te
from tvm.contrib.pickle_memoize |
import memoize
from tvm.contrib |
import nvcc
from tvm.topi.nn.utils |
import get_pad_tuple3d
from tvm.topi.utils |
import get_const_tuple |
import tvm.testing
_conv3d_ndhwc_tensorcore_implement = {
"cuda": (topi.cuda.conv3d_ndhwc_tensorcore, topi.cuda.schedule_conv3d_ndhwc_tensorcore)
}
def verify_conv3d_ndhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
):
"""Test the conv3d with tensorcore for ndhwc layout"""
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel, kernel, kernel)
)
padding_sum = pad_front + pad_top + pad_left + pad_back + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
dtype = "float16"
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), dtype, name="A")
W = te.placeholder((kernel, kernel, kernel, in_channel, num_filter), dtype, name="W")
bias = te.placeholder((1, 1, 1, 1, num_filter), dtype, name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_conv3d_ndhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % |
device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv3d_ndhwc_tensorcore_implement
)
C = fcompute(A, W, stride, padding, dilation, 1, "float16")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e200, rtol=0.01)
check_device(devices)
@tvm.testing.requires_tensorcore
@tvm.testing.requires_cuda
def test_conv3d_ndhwc_tensorcore():
"""Test the conv3d with tensorcore for ndhwc layout"""
verify_conv3d_ndhwc(16, 16, 14, 16, 3, 1, 1)
verify_conv3d_ndhwc(16, 64, 7, 64, 7, 1, 3)
verify_conv3d_ndhwc(16, 32, 7, 32, 7, 1, 3)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_bias=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, (3, 3, 3, 2, 2, 2))
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, "SAME")
verify_conv3d_ndhwc(8, 16, 35, 32, 5, 1, "VALID")
ve |
rify_conv3d_ndhwc(16, 32, 16, 32, 3, 1, (1, 1, 1, 1, 1, 1))
verify_conv3d_ndhwc(16, 16, 12, 16, 3, 1, (1, 1, 1, 1, 1, 1))
if __name__ == "__main__":
test_conv3d_ndhwc_tensorcore() |
"""Test code for transposed convolution.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple
_conv3d_transpose_ncdhw_implement = {
"generic": (topi.nn.conv3d_transpose_ncdhw, topi.generic.schedule_conv3d_transpose_ncdhw),
"cpu": (topi.x86.conv3d_transpose_ncdhw, topi.x86.schedule_conv3d_transpose_ncdhw),
"gpu": (topi.cuda.conv3d_transpose_ncdhw, topi.cuda.schedule_conv3d_transpose_ncdhw),
}
def verify_conv3d_transpose_ncdhw(
batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
in_depth, in_height, in_width = in_size
kernel_depth, kernel_height, kernel_width = kernel
stride_depth, stride_height, stride_width = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = padding
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder(
(in_channel, num_filter, kernel_depth, kernel_height, kernel_width), name="W"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_transpose.verify_conv3d_transpose_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv3d_transpose_ncdhw_python(
a_np, w_np, stride, padding, output_padding
)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(
target, _conv3d_transpose_ncdhw_implement
)
B = fcompute(
A,
W,
[stride_depth, stride_height, stride_width],
[pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right],
A.dtype,
output_padding,
)
C = topi.nn.relu(B) |
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, atol=1e-4, rtol=1e-4)
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e-4, rtol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv3d_transpose_ncdhw():
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 1, (1, 1, 1), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 2, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (2, 2, 2)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (1, 0, 2)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (2, 2, 2), (2, 2, 2), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 32, (5, 5, 5), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (5, |
5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (5, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (1, 1, 1)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 7), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 3, 7), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 3), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
if __name__ == "__main__":
test_conv3d_transpose_ncdhw() |
"""Test code for 3d convolution with winograd.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.nn.utils |
import get_pad_tuple3d
from tvm.topi.utils |
import get_const_tuple
_conv3d_ncdhw_implement = {
"gpu": (topi.cuda.conv3d_ncdhw_winograd, topi.cuda.schedule_conv3d_ncdhw_winograd),
}
def verify_conv3d_ncdhw(
batch,
in_channel,
in_size,
num_filter,
depth_kernel,
space_kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (depth_kernel, space_kernel, space_kernel)
)
padding_sum = pad_front + pad_back + pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, space_kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel, depth_kernel, space_kernel, space_kernel), name="W")
bias = te.placeholder((num_filter, 1, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ncdhw.verify_conv3d_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Runn |
ing on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
with tvm.target.Target(device):
C = fcompute(
A, W, (stride, stride, stride), padding, (dilation, dilation, dilation), 1, dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
space_kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
space_kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4, atol=1e-6)
for device in ["cuda"]:
with autotvm.tophub.context(device):
check_device(device)
@tvm.testing.requires_gpu
def test_conv3d_ncdhw():
verify_conv3d_ncdhw(1, 61, 20, 120, 3, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 1, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 5, 3, 1, 0)
veri |
fy_conv3d_ncdhw(1, 61, 20, 120, 5, 5, 1, 2)
verify_conv3d_ncdhw(1, 61, 20, 120, 1, 5, 1, 2)
verify_conv3d_ncdhw(1, 61, 20, 120, 7, 7, 1, 3)
verify_conv3d_ncdhw(1, 128, 12, 256, 3, 3, 1, 1)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1, add_relu=True)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ncdhw(1, 64, 12, 128, 1, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ncdhw(1, 16, 12, 16, 3, 3, 1, "VALID", dilation=2)
verify_conv3d_ncdhw(1, 16, 12, 16, 1, 3, 1, "VALID", dilation=2)
verify_conv3d_ncdhw(4, 32, 12, 64, 3, 3, 1, 1)
verify_conv3d_ncdhw(4, 32, 12, 64, 1, 3, 1, 1)
verify_conv3d_ncdhw(2, 2, 2, 2, 3, 3, 1, 2)
verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 1, 3)
if __name__ == "__main__":
test_conv3d_ncdhw() |
"""test of correlation operator in NCHW layout""" |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import autotvm, te, topi
_correlation_implement = {
"generic": (topi.nn.correlation_nchw, topi.generic.schedule_correlation_nchw),
"gpu": (topi.cuda.correlation_nchw, topi.cuda.schedule_correlation_nchw),
}
(
data_shape,
kernel_size,
max_displacement,
stride1,
stride2,
pad_size,
is_multiply,
) = tvm.testing.parameters(
((1, 3, 10, 10), 1, 4, 1, 1, 4, True),
((1, 3, 10, 10), 1, 5, 1, 1, 5, True),
((5, 1, 4, 4), 3, 1, 2, 1, 2, True),
((5, 1, 6, 4), 3, 1, 2, 2, 2, False),
((5, 1, 11, 11), 5, 1, 1, 1, 2, False),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
dtype, data_shape, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply
):
a_np = np.random.uniform(size=data_shape).astype(dtype)
b_np = np.random.uniform(size=data_shape).astype(dtype)
c_np = tvm.topi.testing.correlation_nchw_python(
a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply
)
return a_np, b_np, c_np
def test_correlation_nchw(
target,
dev,
ref_data,
dtype,
kernel_size,
max_displacement,
stride1,
stride2,
pad_size,
is_multiply,
):
a_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="data1", dtype=dtype)
B = te.placeholder(b_np.shape, name="data2", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _correlation_implement)
with tvm.target.Target(target):
C = fcompute(A, B, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=dtype, device=dev)
func = tvm.build(s, [A, B, C], target)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple |
import tvm.testing
_deformable_conv2d_nchw_implement = {
"generic": (topi.nn.deformable_conv2d_nchw, topi.generic.schedule_deformable_conv2d_nchw),
"cuda": (topi.cuda.deformable_conv2d_nchw, topi.cuda.schedule_deformable_conv2d_nchw),
}
_deformable_conv2d_nhwc_implement = {
"generic": (topi.nn.deformable_conv2d_nhwc, topi.generic.schedule_deformable_conv2d_nhwc),
}
def verify_deformable_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
deformable_groups=1,
groups=1,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
deformable_groups,
groups,
)
)
A = te.placeholder((batch, in_channel, in_size, in_size), name="A")
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding)
Offset = te.placeholder(
(batch, deformable_groups * kernel * kernel * 2, out_size, out_size), name="offset"
)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
return a_np, offset_np |
, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nchw_implement)
with tvm.target.Target(device):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
offset = tvm.nd.array(offset_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
def verify_deformable_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
deformable_groups=1,
groups=1,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
deformable_groups,
groups,
)
)
A = te.placeholder((batch, in_size, in_size, in_channel), name="A")
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding)
Offset = te.placeholder(
(batch, out_size, out_size, deformable_groups * kernel * kernel * 2), name="offset"
)
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((num_filter,), name="bias")
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape) |
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
return a_np, offset_np, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nhwc_implement)
with tvm.target.Target(device):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
offset = tvm.nd.array(offset_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["llvm"]:
check_device(device)
@tvm.testing.uses_gpu
def test_deformable_conv2d_nchw():
verify_deformable_conv2d_nchw(1, 16, 7, 16, 1, 1, 0, deformable_groups=4)
verify_deformable_conv2d_nchw(1, 16, 7, 16, 3, 1, 1, dilation=2, deformable_groups=4)
verify_deformable_conv2d_nchw(1, 16, 7, 16, 3, 1, 2, dilation=2)
def test_deformable_conv2d_nhwc():
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 1, 1, 0, deformable_groups=4)
verify_defo |
rmable_conv2d_nhwc(1, 16, 7, 16, 3, 1, 1, dilation=2, deformable_groups=4)
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 3, 1, 2, dilation=2)
if __name__ == "__main__":
test_deformable_conv2d_nchw()
test_deformable_conv2d_nhwc() |
"""Test code for dense operator""" |
import contextlib |
import numpy as np |
import pytest |
import sys |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
from tvm.topi.utils |
import get_const_tuple
from common |
import Int8Fallback
random_seed = tvm.testing.parameter(0)
use_bias = tvm.testing.parameter(True, False)
batch_size = tvm.testing.parameter(1, 2, 128)
in_dim, out_dim = tvm.testing.parameters((1024, 1000))
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float16"),
("int8", "int32"),
)
_dense_implementations = {
"generic": [(topi.nn.dense, topi.generic.schedule_dense)],
"cpu": [
(topi.x86.dense_nopack, topi.x86.schedule_dense_nopack),
(topi.x86.dense_pack, topi.x86.schedule_dense_pack),
],
"gpu": [
(topi.gpu.dense_small_batch, topi.gpu.schedule_dense_small_batch),
(topi.gpu.dense_large_batch, topi.gpu.schedule_dense_large_batch),
],
"mali": [(topi.mali.dense, topi.mali.schedule_dense)],
"bifrost": [(topi.bifrost.dense, topi.bifrost.schedule_dense)],
"hls": [(topi.nn.dense, topi.hls.schedule_dense)],
}
@tvm.testing.fixture(cache_return_value=True)
def dense_ref_data(random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype):
np.random.seed(random_seed)
if "float" in in_dtype:
a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype)
elif in_dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(out_dtype)
else:
raise ValueError("No method to generate test data for data type '{}'".format(in_dtype))
matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype))
if use_bias:
matmul += c_np
d_np = np.maximum(matmul, 0)
return (a_np, b_np, c_np, d_np)
def test_dense(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ |
ref_data,
in_dtype,
out_dtype,
implementations=None,
):
target = tvm.target.Target(target)
if target.kind.name == "cuda":
if in_dtype == "int8" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
pytest.xfail("CUDA int8 intrinsics not available")
if in_dtype == "float16" and not tvm.contrib.nvcc.have_fp16(dev.compute_version):
pytest.xfail("CUDA float16 intrinsics not available")
if target.kind.name == "vulkan":
if in_dtype == "int8" and (
not target.attrs.get("supports_int8", False)
or not target.attrs.get("supports_8bit_buffer", False)
):
pytest.xfail("Vulkan int8 driver support not available")
if in_dtype == "float16" and (
not target.attrs.get("supports_float16", False)
or not target.attrs.get("supports_16bit_buffer", False)
):
pytest.xfail("Vulkan float16 driver support not available")
if (
target.kind.name not in ["llvm", "c"]
and len(set(target.keys) & set(_dense_implementations)) == 0
):
pytest.xfail("No implementation for tvm.topi.testing.dispatch to find")
if "int" in in_dtype:
tol = {"atol": 0, "rtol": 0}
elif in_dtype == "float32":
tol = {"rtol": 1e-5, "atol": 1e-5}
elif in_dtype == "float16":
tol = {"rtol": 5e-2, "atol": 1e-5}
A = te.placeholder((batch_size, in_dim), name="A", dtype=in_dtype)
B = te.placeholder((out_dim, in_dim), name="B", dtype=in_dtype)
C = te.placeholder((out_dim,), name="C", dtype=out_dtype)
a_np, b_np, c_np, d_np = dense_ref_data
if implementations is None:
implementations = tvm.topi.testing.dispatch(target, _dense_implementations)
for fcompute, fschedule in implementations:
with tvm.target.Target(target):
D = fcompute(A, B, C if use_bias else None, out_dtype)
D = topi.nn.relu(D)
s = fschedule([D])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array |
(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev)
f = tvm.build(s, [A, B, C, D], target, name="dense")
f(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, **tol)
@pytest.mark.parametrize("target,in_dtype,out_dtype", [("cuda", "int8", "int32")])
def test_dense_cuda_int8(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
):
implementations = [
(topi.cuda.dense_int8, topi.cuda.schedule_dense_int8),
]
with Int8Fallback():
test_dense(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
implementations=implementations,
)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for dense tensorcore operator""" |
import numpy as np |
import tvm
from tvm |
import topi |
import tvm.topi.testing
from tvm.topi.utils |
import get_const_tuple
from tvm |
import te
from tvm.contrib.pickle_memoize |
import memoize |
import tvm.testing
_dense_implement = {"gpu": [(topi.cuda.dense_tensorcore, topi.cuda.schedule_dense_tensorcore)]}
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
K, L = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(K, L
for k in range(K):
for l in range(L
for m in range(min(8, L - l * 8)):
a_int4[k, l] = a_int4[k, l] | ((a_int32[k, l * 8 + m] & 0xF) << ((7 - m) * 4))
return a_int4
def convert_int32_into_int4_bias(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
(L,) = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(L
for l in range(L
for m in range(min(8, L - l * 8)):
a_int4[l] = a_int4[l] | ((a_int32[l * 8 + m] & 0xF) << ((7 - m) * 4))
return a_int4
def verify_dense(batch, in_dim, out_dim, dtype, use_bias=True):
"""Dense tensorcore verify function"""
A = te.placeholder((batch, in_dim), name="A", dtype=dtype)
B = te.placeholder((out_dim, in_dim), name="B", dtype=dtype)
C = te.placeholder((out_dim,), name="C", dtype=dtype)
assert dtype in ["int4", "int8", "float16"]
out_dtype = "float32"
if dtype in ["int8", "int4"]:
out_dtype = "int32"
@memoize("topi.tests.test_topi_dense_tensorcore")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=(batch, in_dim))
b_np = np.random.randint(low=-8, high=7, size=(out_dim, in_dim))
c_np = np.random.randint(low=-8, high=7, size=(out_dim,))
elif dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch, in_dim)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(dtype)
c_np = np.random.randint(low=-128, high=127, s |
ize=(out_dim,)).astype(dtype)
else:
a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(dtype)
d_np = tvm.topi.testing.dense(a_np, b_np, c_np, use_bias, True, out_dtype)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
b_np = convert_int32_into_int4(b_np)
c_np = convert_int32_into_int4_bias(c_np)
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
with tvm.target.Target(device):
D = fcompute(A, B, C if use_bias else None, out_dtype)
D = topi.nn.relu(D)
s = fschedule([D])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev)
f = tvm.build(s, [A, B, C, D], device, name="dense")
f(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-3)
check_device("cuda")
@tvm.testing.requires_tensorcore
def test_dense_tensorcore():
"""Test cases"""
for dtype in ["float16", "int8"]:
verify_dense(8, 16, 32, "float16", use_bias=True)
verify_dense(16, 32, 16, dtype, use_bias=True)
verify_dense(256, 1024, 1024, dtype, use_bias=True)
verify_dense(1000, 1024, 1024, dtype, use_bias=False)
verify_dense(256, 2048, 1000, dtype, use_bias=False)
verify_dense(16, 32, 16, "int4", use_bias=False)
verify_dense(256, 1024, 1024, "int4", use_bias=False)
verify_dense(1000, 1024, 1024, "int4", use_bias=False)
verify_dense(256, 2048, 1000, "int4", use_bias=False)
if __name__ == " |
__main__":
test_dense_tensorcore() |
"""Test code for depth to space""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.