text
stringlengths 1
2.05k
|
---|
"""Test code for clip operator""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.topi.utils |
import get_const_tuple
from tvm.contrib.pickle_memoize |
import memoize
def verify_clip(N, a_min, a_max, dtype):
A = te.placeholder((N, N), dtype=dtype, name="A")
B = topi.clip(A, a_min, a_max)
s = te.create_schedule([B.op])
@memoize("topi.tests.test_topi_clip")
def get_ref_data():
a_np = np.random.uniform(a_min * 2, a_max * 2, size=(N, N)).astype(dtype)
b_np = np.clip(a_np, a_min, a_max)
return a_np, b_np
a_np, b_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], target, name="clip")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_clip():
verify_clip(1024, -127, 127, "float32")
verify_clip(1024, -127, 127, "int16")
verify_clip(1024, -127, 127, "int8")
if __name__ == "__main__":
test_clip() |
"""Test code for transposed convolution.""" |
import numpy as np |
import itertools |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple
_conv1d_ncw_implement = {
"generic": (topi.nn.conv1d_ncw, topi.generic.schedule_conv1d_ncw),
"cpu": (topi.nn.conv1d_ncw, topi.x86.schedule_conv1d_ncw),
"gpu": (topi.cuda.conv1d_ncw, topi.cuda.schedule_conv1d_ncw),
}
_conv1d_nwc_implement = {
"generic": (topi.nn.conv1d_nwc, topi.generic.schedule_conv1d_nwc),
"cpu": (topi.nn.conv1d_nwc, topi.x86.schedule_conv1d_nwc),
"gpu": (topi.cuda.conv1d_nwc, topi.cuda.schedule_conv1d_nwc),
}
_group_conv1d_implementations = {
"NCW": {
"generic": (topi.nn.group_conv1d_ncw, topi.generic.schedule_group_conv1d_ncw),
"cpu": (topi.nn.group_conv1d_ncw, topi.x86.schedule_group_conv1d_ncw),
"gpu": (topi.cuda.group_conv1d_ncw, topi.cuda.schedule_group_conv1d_ncw),
},
"NWC": {
"generic": (topi.nn.group_conv1d_nwc, topi.generic.schedule_group_conv1d_nwc),
"cpu": (topi.nn.group_conv1d_nwc, topi.x86.schedule_group_conv1d_nwc),
"gpu": (topi.cuda.group_conv1d_nwc, topi.cuda.schedule_group_conv1d_nwc),
},
}
def verify_conv1d(
batch,
in_channels,
in_width,
filters,
kernel_size=3,
stride=1,
dilation=1,
padding="VALID",
layout="NCW",
):
if layout == "NCW":
in_shape = [batch, in_channels, in_width]
kernel_shape = [filters, in_channels, kernel_size]
else:
in_shape = [batch, in_width, in_channels]
kernel_shape = [kernel_size, in_channels, filters]
dtype = "float32"
A = te.placeholder(in_shape, name="A", dtype=dtype)
W = te.placeholder(kernel_shape, name="W", dtype=dtype)
def get_ref_data(layout):
a_np = np.random.uniform(size=in_shape).astype(dtype)
w_np = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NWC":
np_in = np.transpose(a_np, [0, 2, 1])
np_w = np.transpose(w_np, [2, 1, 0])
else:
np_in = a_np
np_w = w_np
b_np = tvm.topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilati |
on)
if layout == "NWC":
b_np = np.transpose(b_np, [0, 2, 1])
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data(layout)
def check_target(target, dev):
if layout == "NCW":
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_ncw_implement)
else:
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_nwc_implement)
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, "float32")
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv1d():
for layout in ["NCW", "NWC"]:
verify_conv1d(1, 1, 8, 1, 3, 1, 1, "VALID", layout)
verify_conv1d(1, 1, 8, 1, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 3, 2, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 3, 1, 2, "SAME", layout)
verify_conv1d(8, 16, 32, 16, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 2, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 1, 1, 1, "SAME", layout)
verify_conv1d(1, 17, 12, 21, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 5, 27, 18, 3, 1, 1, "VALID", layout)
layout = tvm.testing.parameter("NCW", "NWC")
padding = tvm.testing.parameter("SAME", "VALID")
dtype = tvm.testing.parameter("float32")
shape = tvm.testing.parameter(
[1, 4, 8, 4, 3, 1, 1, 4],
[1, 4, 8, 4, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 2, |
1, 4],
[1, 16, 32, 16, 3, 1, 2, 4],
[8, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 2, 1, 1, 4],
[1, 16, 32, 16, 1, 1, 1, 4],
[1, 21, 12, 21, 3, 1, 1, 3],
[1, 20, 27, 20, 3, 1, 1, 5],
)
def test_group_conv1d(shape, layout, padding, target, dev, dtype):
batch, in_channels, in_width, filters, kernel_size, stride, dilation, groups = shape
if layout == "NCW":
in_shape = [batch, in_channels, in_width]
kernel_shape = [filters, in_channels
else:
in_shape = [batch, in_width, in_channels]
kernel_shape = [kernel_size, in_channels
a_np = np.random.uniform(size=in_shape).astype(dtype)
w_np = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NWC":
np_in = np.transpose(a_np, [0, 2, 1])
np_w = np.transpose(w_np, [2, 1, 0])
else:
np_in = a_np
np_w = w_np
b_np = tvm.topi.testing.group_conv1d_ncw_python(np_in, np_w, stride, padding, dilation, groups)
if layout == "NWC":
b_np = np.transpose(b_np, [0, 2, 1])
A = te.placeholder(in_shape, name="A", dtype=dtype)
W = te.placeholder(kernel_shape, name="W", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv1d_implementations[layout])
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, groups, "float32")
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
print(tvm.lower(s, [A, W, B], target))
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
test_conv1d() |
"""Test code for transposed convolution.""" |
import itertools |
import os |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
from tvm.topi.utils |
import get_const_tuple
_conv1d_transpose_ncw_implement = {
"generic": (topi.nn.conv1d_transpose_ncw, topi.generic.schedule_conv1d_transpose_ncw),
"gpu": (topi.cuda.conv1d_transpose_ncw, topi.cuda.schedule_conv1d_transpose_ncw),
}
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
output_padding,
) = tvm.testing.parameters(
(1, 3, 224, 32, 5, 1, 0, (0,)),
(1, 3, 224, 32, 7, 1, 2, (0,)),
(1, 3, 224, 32, 5, 2, 1, (0,)),
(1, 3, 224, 32, 5, 2, 1, (1,)),
(1, 3, 224, 32, 5, 2, 0, (0,)),
(1, 32, 32, 128, 5, 1, 0, (0,)),
(1, 32, 32, 128, 5, 2, 1, (0,)),
(1, 1, 1024, 1, 512, 1, 256, (0,)),
(1, 1, 1024, 1, 512, 2, 256, (0,)),
(1, 1, 1024, 1, 512, 5, 256, (0,)),
(1, 1, 1024, 1, 512, 5, 256, (3,)),
(1, 2, 1024, 1, 128, 128, 0, (0,)),
(1, 1, 1024, 2, 128, 128, 0, (0,)),
(1, 1, 1024, 2, 2, 2, 0, (0,)),
(1, 1, 10, 1, 5, 1, (0, 3), (0,)),
(1, 1, 10, 1, 5, 1, (1, 3), (0,)),
(1, 1, 10, 1, 5, 1, (2, 3), (0,)),
(1, 257, 128, 1, 512, 128, 256, (0,)),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
dtype = "float32"
a_shape = (batch, in_channel, in_size)
w_shape = (in_channel, num_filter, kernel)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
@tvm.testing.known_failing_targets("vulkan")
def test_conv1d_transpose_ncw(
target,
dev,
ref_data,
dtype,
stride,
padding,
output_padding,
):
a_np, w_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
with tvm.target.Target(target):
fcompute |
, fschedule = tvm.topi.testing.dispatch(target, _conv1d_transpose_ncw_implement)
B = fcompute(A, W, stride, padding, A.dtype, output_padding)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
"""Test for NCHW[x]c convolution""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.nn.utils |
import get_pad_tuple
from tvm.topi.utils |
import get_const_tuple
def _transform_data(data, bn):
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1))
return kernel
def _transform_bias(bias, bn):
num_filter, h, w = bias.shape
bias = np.reshape(bias, (num_filter
bias = np.transpose(bias, (0, 2, 3, 1))
return bias
def verify_conv2d_NCHWc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
dtype="float32",
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
in_height = in_width = in_size
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum)
)
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 1
for bn in range(oc_block, 0, -1):
if in_channel % bn == 0:
ic_block = bn
break
A = te.placeholder((batch, in_channel
W = te.placeholder(
(num_filter
name="W",
)
bias = te.placeholder((num_filter
@memoize("topi.tests.test_topi_conv2d_NCHWc.verify_conv2d_NCHWc")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
w_np = np.random.uniform(size=(num_filter, in_channel, kernel, kernel)).astype(dtype)
b_np = np.random.uniform(size=(num_filter, 1, 1)).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.co |
nv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return (
_transform_data(a_np, ic_block),
_transform_kernel(w_np, ic_block, oc_block),
_transform_bias(b_np, oc_block),
_transform_data(c_np, oc_block),
)
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.x86.conv2d_NCHWc(
A,
W,
(stride, stride),
padding,
(dilation, dilation),
"NCHW%dc" % ic_block,
"NCHW%dc" % oc_block,
dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) |
for device in ["llvm"]:
with autotvm.tophub.context(device):
check_device(device)
def test_conv2d_NCHWc():
verify_conv2d_NCHWc(1, 3, 224, 64, 7, 2, 3)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 56, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 64, 56, 128, 3, 2, 1)
verify_conv2d_NCHWc(1, 64, 56, 128, 1, 2, 0)
verify_conv2d_NCHWc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_NCHWc(1, 128, 28, 256, 3, 2, 1)
verify_conv2d_NCHWc(1, 128, 28, 256, 1, 2, 0)
verify_conv2d_NCHWc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_NCHWc(1, 256, 14, 512, 3, 2, 1)
verify_conv2d_NCHWc(1, 256, 14, 512, 1, 2, 0)
verify_conv2d_NCHWc(1, 512, 7, 512, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_bias=True, add_relu=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, dilation=2)
verify_conv2d_NCHWc(4, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(9, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(2, 2, 2, 2, 2, 2, 2)
verify_conv2d_NCHWc(3, 3, 3, 3, 3, 3, 3)
verify_conv2d_NCHWc(4, 4, 4, 4, 4, 4, 4)
verify_conv2d_NCHWc(5, 5, 5, 5, 5, 5, 5)
verify_conv2d_NCHWc(6, 6, 6, 6, 6, 6, 6)
verify_conv2d_NCHWc(1, 3, 299, 32, 3, 2, 0)
verify_conv2d_NCHWc(1, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc(1, 32, 147, 64, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 73, 80, 1, 1, 0)
verify_conv2d_NCHWc(1, 80, 73, 192, 3, 1, 0)
verify_conv2d_NCHWc(1, 192, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 48, 35, 64, 5, 1, 2)
verify_conv2d_NCHWc(1, 64, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc(1, 96, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc(1, 192, 35, 32, 1, 1, 0)
verify_conv2d_NCHWc(1, 256, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 256, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 64, 1, 1, |
0)
verify_conv2d_NCHWc(1, 288, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 384, 3, 2, 0)
verify_conv2d_NCHWc(1, 96, 35, 96, 3, 2, 0)
verify_conv2d_NCHWc(1, 768, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 768, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc(1, 128, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc(1, 128, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 128, 17, 128, 7, 1, 3)
verify_conv2d_NCHWc(1, 128, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 768, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc(1, 160, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc(1, 160, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 160, 17, 160, 7, 1, 3)
verify_conv2d_NCHWc(1, 160, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 192, 17, 320, 3, 2, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 3, 2, 0)
verify_conv2d_NCHWc(1, 1280, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc(1, 1280, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 384, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 384, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc(1, 1280, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc(1, 448, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc(1, 1280, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 1024, 19, 84, 3, 1, 1)
verify_conv2d_NCHWc(1, 2048, 10, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 512, 5, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 256, 3, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 32, 17, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_NCHWc(1, 32, 35, 128, 3, 1, (3, 3, 2, 2))
verify_conv2d_NCHWc(1, 32, 35, 32, 1, 1, (1, 2, 2, 1))
verify_conv2d_NCHWc(1, 32, 17, 192, 1, 1, (1, 2))
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, (3, 1))
verify_conv2d_NCHWc(1, 128, 8, 384, 3, 1, (0, 2))
verify_conv2d_NCHWc(1, 32, 8, 32, 1, 1, "VALID") |
verify_conv2d_NCHWc(1, 388, 8, 32, 3, 1, "VALID")
verify_conv2d_NCHWc(1, 512, 19, 32, 1, 1, "SAME")
verify_conv2d_NCHWc(1, 32, 10, 32, 2, 1, "SAME")
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, (1, 2, 2, 1), add_relu=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 5, 2, (1, 3), add_bias=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, "VALID", add_bias=True, add_relu=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 24, 1, "SAME", add_bias=True, add_relu=True)
if __name__ == "__main__":
test_conv2d_NCHWc() |
"""Example code to do convolution.""" |
import os |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple |
import tvm.testing
_conv2d_hwcn_implement = {
"generic": (topi.nn.conv2d_hwcn, topi.generic.schedule_conv2d_hwcn),
"gpu": (topi.cuda.conv2d_hwcn, topi.cuda.schedule_conv2d_hwcn),
"opencl": (topi.cuda.conv2d_hwcn, topi.cuda.schedule_conv2d_hwcn),
}
def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
B = te.placeholder((1, num_filter, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
b_shape = get_const_tuple(B.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_hwcn.verify_hwcn")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c1_np = tvm.topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
c2_np = c1_np + b_np
c3_np = np.maximum(c2_np, 0)
return a_np, w_np, b_np, c1_np, c2_np, c3_np
a_np, w_np, b_np, c1_np, c2_np, c3_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_hwcn_implement)
t_conv = fcompute(A, W, stride, padding, dilation)
t_bias = topi.add(t_conv, B)
t_relu = topi.nn.relu(t_bias)
s1 = fschedule([t_conv])
s2 = fschedule([t_bias])
s3 = fschedule([t_relu])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.ar |
ray(w_np, dev)
b = tvm.nd.array(b_np, dev)
conv_out = tvm.nd.array(np.zeros(get_const_tuple(t_conv.shape), dtype=t_conv.dtype), dev)
bias_out = tvm.nd.array(np.zeros(get_const_tuple(t_bias.shape), dtype=t_bias.dtype), dev)
relu_out = tvm.nd.array(np.zeros(get_const_tuple(t_relu.shape), dtype=t_relu.dtype), dev)
func1 = tvm.build(s1, [A, W, t_conv], target)
func2 = tvm.build(s2, [A, W, B, t_bias], target)
func3 = tvm.build(s3, [A, W, B, t_relu], target)
func1(a, w, conv_out)
func2(a, w, b, bias_out)
func3(a, w, b, relu_out)
tvm.testing.assert_allclose(conv_out.numpy(), c1_np, rtol=1e-5)
tvm.testing.assert_allclose(bias_out.numpy(), c2_np, rtol=1e-5)
tvm.testing.assert_allclose(relu_out.numpy(), c3_np, rtol=1e-5)
for target in ["cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_conv2d_hwcn():
verify_conv2d_hwcn(1, 256, 32, 128, 3, 1, "SAME")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "SAME")
verify_conv2d_hwcn(4, 128, 16, 128, 5, 2, "SAME")
verify_conv2d_hwcn(4, 128, 16, 256, 5, 2, "SAME")
verify_conv2d_hwcn(1, 256, 32, 128, 3, 1, "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "VALID")
verify_conv2d_hwcn(4, 128, 16, 128, 5, 2, "VALID")
verify_conv2d_hwcn(4, 128, 16, 256, 5, 2, "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "SAME", dilation=2)
verify_conv2d_hwcn(1, 256, 32, 128, 3, (1, 1), "SAME")
verify_conv2d_hwcn(1, 256, 32, 256, 3, (1, 1), "SAME")
verify_conv2d_hwcn(4, 128, 16, 128, 5, (2, 2), "SAME")
verify_conv2d_hwcn(4, 128, 16, 256, 5, (2, 2), "SAME")
verify_conv2d_hwcn(1, 256, 32, 128, 3, (1, 1), "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, (1, 1), "VALID")
verify_conv2d_hwcn(4, 128, 16, 128, 5, (2, 2), "VALID")
verify_conv2d_hwcn(4, 128, 16, 256, 5, (2, 2), "VALID")
if __name__ == "__main__":
test_conv2d_hwcn() |
"""Example code to do convolution.""" |
import numpy as np |
import tvm |
import os |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, autotvm, topi, relay
from tvm.contrib.pickle_memoize |
import memoize
from tvm.contrib |
import nvcc
from tvm.topi.nn.utils |
import get_pad_tuple
from tvm.topi.utils |
import get_const_tuple
_conv2d_hwnc_tensorcore_implement = {
"cuda": (topi.cuda.conv2d_hwnc_tensorcore, topi.cuda.schedule_conv2d_hwnc_tensorcore)
}
def verify_conv2d_hwnc(
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, dtype="int4"
):
"""Test the conv2d with tensorcore for hwnc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
assert dtype in ["int4", "int8"]
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, batch, in_channel), name="A", dtype=dtype)
W = te.placeholder((kernel, kernel, num_filter, in_channel), name="W", dtype=dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_conv2d_hwnc.verify_conv2d_hwnc")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=a_shape).transpose((2, 0, 1, 3))
w_np = np.random.randint(low=-8, high=7, size=w_shape)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
elif dtype == "int8":
a_np = (
np.random.randint(low=-128, high=127, size=a_shape)
.transpose((2, 0, 1, 3))
.astype(dtype)
)
w_np = np.random.randint(low=-128, high=127, size=w_shape).astype(dtype)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, c_np
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
-------- |
--
a_int32 : int
Return
------
a_int4 : int
"""
I, J, K, L = a_int32.shape
a_int4 = np.zeros(shape=(I, J, K, L
for i in range(I):
for j in range(J):
for k in range(K):
for l in range(L
for m in range(min(8, L - l * 8)):
a_int4[i, j, k, l] = a_int4[i, j, k, l] | (
(a_int32[i, j, k, l * 8 + m] & 0xF) << ((7 - m) * 4)
)
return a_int4
a_np, w_np, c_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
w_np = convert_int32_into_int4(w_np)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if not nvcc.have_tensorcore(dev.compute_version):
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = topi.testing.dispatch(target, _conv2d_hwnc_tensorcore_implement)
C = fcompute(A, W, stride, padding, dilation, dtype, "int32")
s = fschedule([C])
a = tvm.nd.array(a_np.transpose((1, 2, 0, 3)), dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy().transpose((2, 0, 1, 3)), c_np, rtol=rtol)
check_target("cuda")
def verify_feature_length():
np.random.seed(123)
target = "cuda"
ctx = tvm.device(target)
batch_size |
= 32
input_shape = (32, 512, 7, 7)
kernel_shape = (512, 512, 3, 3)
def get_mod():
x = relay.var("x", relay.TensorType(input_shape, "float32"))
y = relay.var("y", relay.TensorType(kernel_shape, "float32"))
f = relay.Function(
[x, y], relay.nn.conv2d(x, y, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3])
)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
return mod, {}
mod, params = get_mod()
layout_config = relay.transform.LayoutConfig()
desired_layouts = {"nn.conv2d": ["HWNC", "default"]}
with layout_config:
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
mod = relay.transform.recast(mod, "int4", "int32")
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
assert len(tasks) == 1
task = tasks[0]
space = task.config_space
idx1 = space.get_rand_index()
idx2 = space.get_rand_index()
cfg = space.get(idx1)
sch, arg_bufs = task.instantiate(cfg)
fea1 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
cfg = space.get(idx2)
sch, arg_bufs = task.instantiate(cfg)
fea2 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
assert len(fea1) == len(fea2)
@tvm.testing.requires_tensorcore
def test_conv2d_hwnc_tensorcore():
"""Test the conv2d with tensorcore for hwnc layout"""
verify_conv2d_hwnc(8, 64, 56, 64, 3, 1, 1, dtype="int8")
verify_conv2d_hwnc(8, 64, 56, 64, 1, 1, 0, dtype="int4")
verify_conv2d_hwnc(8, 64, 56, 128, 3, 2, 1)
verify_conv2d_hwnc(8, 64, 56, 64, 1, 2, 0)
verify_conv2d_hwnc(8, 128, 28, 128, 3, 1, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 3, 2, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 1, 2, 0)
verify_conv2d_hwnc(8, 256, 14, 256, 3, 1, 1)
ve |
rify_conv2d_hwnc(8, 256, 14, 512, 3, 2, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 1, 2, 0)
verify_conv2d_hwnc(8, 512, 9, 512, 3, 1, 1)
verify_feature_length()
if __name__ == "__main__":
test_conv2d_hwnc_tensorcore() |
"""Example code to do convolution.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.nn.utils |
import get_pad_tuple
from tvm.topi.utils |
import get_const_tuple
from tvm.topi.nn.conv2d |
import _get_workload
from tvm.topi.generic.conv2d |
import fallback_schedule_cpu_common_int8
from common |
import Int8Fallback |
import tvm.testing |
import pytest |
import platform
def compile_conv2d_NHWC_gemm_int8_arm(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="int8")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8")
bias = te.placeholder((num_filter,), name="bias", dtype="int8")
dtype = "int32"
devices = [
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+dotprod",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+dotprod",
topi.arm_cpu.compute_conv2d_NHWC_quantized_native,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_native,
),
]
for device_tuple in devices:
target = device_tuple[0]
compute = device_tuple[1]
schedule = device_tuple[2]
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Compiling on arm AArch64 target: %s" % target)
with tvm.target.Target(target) as tvm_target:
assert tvm_tar |
get.features.is_aarch64, "AArch64 target not recognized"
C = compute(A, W, (stride, stride), padding, (dilation, dilation), dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = schedule([C])
if add_bias:
tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%dnnn_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
def verify_conv2d_NHWC_gemm_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="int8")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8")
bias = te.placeholder((num_filter,), name="bias", dtype="int8")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dt |
ype = A.dtype
@memoize("topi.tests.test_topi_conv2d_int8.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved(
A, W, (stride, stride), padding, (dilation, dilation), dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_% |
d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
check_target("llvm")
def verify_conv2d_NCHWc_int8(
in_dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype=in_dtype)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W", dtype=in_dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
out_dtype = "int32" if in_dtype == "int8" else "uint32"
lo = -128 if in_dtype == "int8" else 0
hi = 127 if in_dtype == "int8" else 255
def check_target(target, compute, schedule, oc_block_factor, build_only):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
bias = te.placeholder(
(num_filter
)
bias_shape = get_const_tuple(bias.shape) |
@memoize("topi.tests.test_topi_conv2d_int8.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.randint(low=lo, high=hi, size=a_shape).astype(out_dtype)
w_np = np.random.randint(low=lo, high=hi, size=w_shape).astype(out_dtype)
b_np = np.random.uniform(size=bias_shape).astype(out_dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(
out_dtype
)
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(out_dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
with tvm.target.Target(target):
C = compute(
A,
W,
(stride, stride),
padding,
(dilation, dilation),
"NCHW",
"NCHW",
out_dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = schedule([C])
a = tvm.nd.array(a_np.astype(dtype), dev)
w = tvm.nd.array(w_np.astype(dtype), dev)
b = tvm.nd.array(b_np.astype(out_dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
compile_args = [A, W, bias, C]
run_args = [a, w, b, c]
else:
compile_args = [A, W, C]
run_args = [a, w, c]
func = tvm.build(
s,
compile_args,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batc |
h, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
if build_only:
return
print("Running on target: %s" % target)
func(*run_args)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
targets = [
(
"cuda",
lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
topi.cuda.schedule_conv2d_NCHWc_int8,
4,
False,
),
]
build_only_aarch64 = platform.machine() != "aarch64"
targets.append(
(
"llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod",
topi.arm_cpu.conv2d_NCHWc_int8,
topi.arm_cpu.schedule_conv2d_NCHWc_int8,
8,
build_only_aarch64,
)
)
if in_dtype == "int8":
targets += [
(
"llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon",
topi.arm_cpu.conv2d_NCHWc_int8,
topi.arm_cpu.schedule_conv2d_NCHWc_int8,
8,
build_only_aarch64,
),
(
"rocm -mattr=+dotprod",
lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
topi.cuda.schedule_conv2d_NCHWc_int8,
4,
False,
),
]
for target, compute, schedule, oc_block_factor, build_only in targets:
check_target(target, compute, schedule, oc_block_factor, build_only)
def verify_conv2d_nchw_int8(
in_dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, |
%d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype=in_dtype)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W", dtype=in_dtype)
bias = te.placeholder((num_filter, 1, 1), name="bias", dtype=in_dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_int8.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def verify_workload_padding():
_, _, out_height, out_width = get_const_tuple(c_np.shape)
wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)
int32_lanes, num_int8_elements = num_filter, in_channel
cfg = autotvm.get_config()
fallback_schedule_cpu_common_int8(cfg, wkl, int32_lanes, num_int8_elements)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" |
and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.conv2d_nchw_int8(
A, W, (stride, stride), padding, (dilation, dilation), dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_conv2d_nchw_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
verify_workload_padding()
for target in ["cuda"]:
check_target(target)
@pytest.mark.parametrize("in_dtype", ["int8", "uint8"])
def test_conv2d_nchw(in_dtype):
with Int8Fallback():
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 1, 1, 0) |
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 128, 3, 2, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 128, 1, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 28, 128, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 28, 256, 3, 2, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 28, 256, 1, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 14, 256, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 14, 512, 3, 2, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 14, 512, 1, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 512, 7, 512, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_bias=True, add_relu=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, dilation=2)
verify_conv2d_NCHWc_int8(in_dtype, 4, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 9, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 4, 4, 4, 8, 4, 4, 4)
verify_conv2d_NCHWc_int8(in_dtype, 1, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 32, 147, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 73, 80, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 80, 73, 192, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 48, 35, 64, 5, 1, 2)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 96, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 35, 32, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_d |
type, 1, 288, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 288, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 288, 35, 384, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 96, 35, 96, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 768, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 768, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 128, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 768, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 160, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 320, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 192, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 384, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 384, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 448, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 192, 1, 1, 0)
verify_conv2d_ |
NCHWc_int8(in_dtype, 1, 1024, 19, 88, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 7, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 8, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 32, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 32, 35, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 128, 3, 1, (3, 3, 2, 2))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 1, 1, (1, 2, 2, 1))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 17, 192, 1, 1, (1, 2))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 3, 1, (3, 1))
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 8, 384, 3, 1, (0, 2))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 1, 1, "VALID")
verify_conv2d_NCHWc_int8(in_dtype, 1, 392, 8, 64, 3, 1, "VALID")
verify_conv2d_NCHWc_int8(in_dtype, 1, 512, 19, 64, 1, 1, "SAME")
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 16, 32, 2, 1, "SAME")
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 3, 1, (1, 2, 2, 1), add_relu=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 5, 2, (1, 3), add_bias=True)
verify_conv2d_NCHWc_int8(
in_dtype, 1, 64, 56, 64, 3, 1, "VALID", add_bias=True, add_relu=True
)
verify_conv2d_NCHWc_int8(
in_dtype, 1, 64, 56, 64, 24, 1, "SAME", add_bias=True, add_relu=True
)
verify_conv2d_nchw_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1)
verify_conv2d_nchw_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_nchw_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, dilation=2)
verify_conv2d_nchw_int8(in_dtype, 9, 64, 56, 64, 3, 1, 1)
verify_conv2d_nchw_int8(in_dtype, 4, 4, 4, 4, 4, 4, 4)
verify_conv2d_nchw_int8(in_dtype, 1, 32, 149, 32, 3, 1, 0)
verify_conv2d_nchw_int8(in_dtype, 7, 32, 149, 32, 3, 1, 0)
verify_conv2d_nchw_int8(in_dtype, 1, 32, 35, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_nc |
hw_int8(in_dtype, 1, 32, 35, 64, 7, 2, (0, 0, 2, 2))
def test_conv2d_nhwc():
with Int8Fallback():
verify_conv2d_NHWC_gemm_int8(1, 3, 299, 32, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 32, 149, 32, 3, 1, "SAME", dilation=2)
verify_conv2d_NHWC_gemm_int8(4, 32, 147, 64, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 64, 73, 80, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 80, 73, 192, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 35, 48, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 35, 64, 1, 1, "VALID")
verify_conv2d_NHWC_gemm_int8(1, 192, 35, 32, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 48, 35, 64, 5, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 96, 35, 96, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 256, 35, 48, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 256, 35, 64, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 288, 35, 64, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 288, 35, 48, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 96, 35, 96, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 128, 17, 192, 7, 1, "SAME", dilation=2)
verify_conv2d_NHWC_gemm_int8(1, 160, 17, 160, 7, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 160, 17, 192, 1, 1, "VALID")
verify_conv2d_NHWC_gemm_int8(1, 192, 17, 192, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 768, 5, 128, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 17, 320, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 17, 192, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 192, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 384, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 320, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 448, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 384, 8, 384, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 384, 8, 384, 3, 1, "SAME")
verify_conv2d_NHWC_g |
emm_int8(1, 448, 8, 384, 3, 1, "VALID")
verify_conv2d_NHWC_gemm_int8(1, 2048, 8, 320, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 2048, 8, 448, 1, 1, "SAME", add_bias=True, add_relu=True)
verify_conv2d_NHWC_gemm_int8(1, 2048, 8, 192, 1, 1, "SAME", add_bias=True)
compile_conv2d_NHWC_gemm_int8_arm(1, 3, 299, 32, 3, 2, "SAME")
if __name__ == "__main__":
tvm.testing.main() |
"""Example code to do convolution.""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.