text
stringlengths 1
2.05k
|
---|
evice(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(dev):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.x86.conv2d_NCHWc(
A,
W,
(stride, stride),
(padding, padding),
(dilation, dilation),
"NCHW%dc" % ic_block,
"NCHW%dc" % oc_block,
dtype,
)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
for device in ["llvm -mcpu=skylake-avx512"]:
with autotvm.tophub.context(device):
check_device(device)
autotvm.GLOBAL_SCOPE.silent = False
@tvm.testing.uses_gpu
@pytest.mark.skip
def test_conv2d_NCHWc():
verify_group_conv2d_NCHWc_int8(1, 256, 32, 224, 64, 7, 2, 3)
if __name__ == "__main__":
pass |
"""Example code to do group transpose convolution.""" |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple
_group_conv2d_nchw_implement = {
"generic": (
topi.nn.group_conv2d_transpose_nchw,
topi.generic.schedule_group_conv2d_transpose_nchw,
),
"cuda": (topi.cuda.conv2d_transpose_nchw, topi.cuda.schedule_conv2d_transpose_nchw),
}
def verify_group_conv2d_transpose_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
output_padding,
groups,
):
print(
"Workload: (%d, %d, %s, %d, %s, %s, %s, %s, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding, groups)
)
in_height, in_width = in_size
kernel_height, kernel_width = kernel
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((in_channel, num_filter
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d_transpose.verify_group_conv2d_transpose_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np, w_np, stride, padding, output_padding, groups
).astype(dtype)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dtype, output_padding, groups)
s = fsche |
dule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="group_conv2d_transpose_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size[0],
in_size[1],
num_filter,
kernel[0],
kernel[1],
stride[0],
stride[1],
padding[0],
padding[1],
padding[2],
padding[3],
output_padding[0],
output_padding[1],
groups,
),
)
func(a, w, c)
c = c.numpy()
for measurement, reference in zip(c, c_np):
tvm.testing.assert_allclose(measurement, reference, rtol=1e-5)
for target in ["llvm", "cuda"]:
check_target(target)
@tvm.testing.uses_gpu
def test_group_conv2d_transpose_nchw():
verify_group_conv2d_transpose_nchw(1, 4, (32, 32), 4, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0), 2)
verify_group_conv2d_transpose_nchw(1, 9, (32, 32), 9, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0), 3)
verify_group_conv2d_transpose_nchw(1, 4, (32, 32), 16, (5, 5), (2, 2), (1, 1, 1, 1), (0, 0), 4)
verify_group_conv2d_transpose_nchw(
1, 32, (8192, 1), 8, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0), 2
)
verify_group_conv2d_transpose_nchw(
1, 512, (8, 1), 256, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0), 16
)
verify_group_conv2d_transpose_nchw(
1, 512, (8, 1), 256, (31, 1), (2, 1), (14, 0, 15, 0), (1, 0), 16
)
verify_group_conv2d_transpose_nchw(
1, 64, (64, 64), 64, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 64
)
verify_group_conv2d_transpose_nchw(
1, 128, (32, 32), 128, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 128
)
verify_group_conv2d_transp |
ose_nchw(
1, 256, (16, 16), 256, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 256
)
verify_group_conv2d_transpose_nchw(1, 1, (224, 224), 1, (1, 1), (1, 1), (0, 0, 0, 0), (0, 0), 1)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (3, 3), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(1, 48, (64, 64), 12, (4, 4), (2, 2), (1, 1, 1, 1), (0, 0), 1)
if __name__ == "__main__":
test_group_conv2d_transpose_nchw() |
"""Test code for bilinear scale """ |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
from tvm.contrib.pickle_memoize |
import memoize
def verify_resize2d(
batch,
in_channel,
in_height,
in_width,
out_height,
out_width,
layout="NCHW",
coord_trans="align_corners",
method="linear",
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, in_channel, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize2d(
A,
[0.0] * 4,
(out_height, out_width),
layout=layout,
coordinate_transformation_mode=coord_trans,
method=method,
)
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize2d_python(a_np, (scale_h, scale_w), layout, method, coord_trans)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize2d():
verify_resize2d(4, 16, 32, 32, 50, 50, "NCHW")
verify_resize2d(6, 32, 64, 64, 20, 20, "NCHW")
verify_resize2d(4, 16, 32, 32, 50, 50, "NHWC")
verify_resize2d(6, 32, 64, 64, 20, 20, "NHWC")
f |
or layout in ["NCHW", "NHWC"]:
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 64, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 96, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "align_corners", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="linear")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="linear")
def verify_resize3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
layout="NCDHW",
coordinate_transformation_mode="asymmetric",
method="linear",
):
if layout == "NCDHW":
A = te.placeholder(
(batch, in_channel, in_depth, in_height, in_width), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, in_channel, out_depth, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder(
(batch, in_depth, in_height, in_width, in_channel), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, out_depth, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize3d(
A,
[0.0] * 6,
(out_depth, out_height, out_width),
layout=layout,
coordinate_transformation_mode=coordinate_transformati |
on_mode,
method=method,
)
scale_d = out_depth / in_depth
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize3d_python(
a_np, (scale_d, scale_h, scale_w), layout, method, coordinate_transformation_mode
)
def check_target(target, dev):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize3d():
for method in ["nearest_neighbor", "linear"]:
for coord_trans in ["asymmetric", "align_corners", "half_pixel"]:
for layout in ["NCDHW", "NDHWC"]:
verify_resize3d(3, 16, 32, 32, 32, 10, 10, 10, layout, coord_trans, method)
@tvm.testing.uses_gpu
def test_crop_and_resize():
def verify_crop_and_resize(
image_shape,
np_boxes,
np_box_indices,
np_crop_size,
layout="NHWC",
method="bilinear",
extrapolation_value=0.0,
):
images = te.placeholder(image_shape, name="images", dtype="float32")
np_images = np.random.uniform(size=image_shape).astype("float32")
boxes = te.placeholder(np_boxes.shape, name="boxes", dtype="float32")
box_ind = te.placeholder(np_box_indices.shape, name="box_ind", dtype="int32")
batch = len(np_box_indices)
target_height, target_width = np_crop_size[0], np_crop_size[1]
if layout == "NHWC":
channel = image_shape[3]
out_shape = (batch, target_height, target_width, channel)
elif layout == "NCHW":
channel = image_shape[1]
out_shape = (batch, channel, target_height, target_width)
else:
r |
aise NotImplementedError("Layout {} is not supported.".format(layout))
out = topi.image.crop_and_resize(
images,
boxes,
box_ind,
np_crop_size,
layout=layout,
method=method,
extrapolation_value=extrapolation_value,
)
baseline_np = tvm.topi.testing.crop_and_resize_python(
np_images, np_boxes, np_box_indices, np_crop_size, layout, method, extrapolation_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_images = tvm.nd.array(np_images, dev)
tvm_boxes = tvm.nd.array(np_boxes, dev)
tvm_indices = tvm.nd.array(np_box_indices, dev)
tvm_out = tvm.nd.array(np.zeros(out_shape, dtype="float32"), dev)
f = tvm.build(s, [images, boxes, box_ind, out], target, name="crop_and_resize")
f(tvm_images, tvm_boxes, tvm_indices, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), baseline_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
boxes_1 = np.array([[0.2, 0.3, 0.7, 0.9]], dtype="float32")
boxes_2 = np.array([[0.2, 0.3, 0.7, 0.9], [0, 0.1, 0.8, 1]], dtype="float32")
indices_1 = np.array([0], dtype="int32")
indices_2 = np.array([1, 0], dtype="int32")
size_1 = (7, 11)
size_2 = (90, 60)
verify_crop_and_resize((1, 255, 255, 3), boxes_1, indices_1, size_1, layout="NHWC")
verify_crop_and_resize(
(10, 224, 224, 5), boxes_2, indices_2, size_2, extrapolation_value=0.3, layout="NHWC"
)
verify_crop_and_resize((1, 100, 100, 3), boxes_1, indices_1, size_1, method="nearest_neighbor")
verify_crop_and_resize((1, 3, 224, 224), boxes_1, indices_1, size_1, layout="NCHW")
@tvm.testing.uses_gpu
def test_affine_grid():
def verify_affine_grid(n |
um_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = te.placeholder(data_shape, dtype=dtype)
out = topi.image.affine_grid(data, target_shape)
@memoize("topi.tests.test_affine_grid.verify_affine_grid")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape)
return data_np, out_np
data_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample():
def verify_grid_sample(
data_shape,
grid_shape,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
dtype = "float32"
data = te.placeholder(data_shape, dtype=dtype)
grid = te.placeholder(grid_shape, dtype=dtype)
out = topi.image.grid_sample(data, grid, method, layout, padding_mode, align_corners)
@memoize("topi.tests.test_grid_sample.verify_grid_sample")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
out_np = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_c |
orners
)
return data_np, grid_np, out_np
data_np, grid_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_grid = tvm.nd.array(grid_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, grid, out], target)
f(tvm_data, tvm_grid, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
layout_2D = "NCHW"
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
layout_3D = "NCDHW"
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(
data_2D_shape, grid_2D_shape, _method, layout_2D, _padding, _align
)
if _method != "bicubic":
verify_grid_sample(
data_3D_shape, grid_3D_shape, _method, layout_3D, _padding, _align
)
if __name__ == "__main__":
test_resize2d()
test_resize3d()
test_crop_and_resize()
test_affine_grid()
test_grid_sample() |
"""Test code for layer_norm.""" |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple |
import tvm.topi.testing |
import tvm.testing
_layer_norm_schedule = {
"generic": topi.generic.schedule_injective,
}
@tvm.testing.parametrize_targets("llvm")
@pytest.mark.parametrize("shape,axis", [([4, 16], (1,)), ([4, 16, 16], (1, 2))])
def test_layer_norm(target, dev, shape, axis, episilon=1e-5, dtype="float32", rtol=1e-5, atol=1e-5):
data = te.placeholder(shape, dtype=dtype, name="data")
scale_shape = [shape[dim] for dim in axis]
gamma = te.placeholder(scale_shape, dtype=dtype, name="gamma")
beta = te.placeholder(scale_shape, dtype=dtype, name="beta")
B = topi.nn.layer_norm(data, gamma, beta, axis, episilon)
data_np = np.random.uniform(size=shape).astype(dtype)
gamma_np = np.random.uniform(size=scale_shape).astype(dtype)
beta_np = np.random.uniform(size=scale_shape).astype(dtype)
b_np = tvm.topi.testing.layer_norm_python(data_np, gamma_np, beta_np, axis, episilon)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _layer_norm_schedule)
s = s_func([B])
data_tvm = tvm.nd.array(data_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
b_tvm = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [data, gamma, beta, B], target)
f(data_tvm, gamma_tvm, beta_tvm, b_tvm)
tvm.testing.assert_allclose(b_tvm.asnumpy(), b_np, rtol=rtol, atol=atol)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for loss operators.""" |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.topi.testing |
import tvm.testing
prediction_shape, reduction, ignore_index, dtype = tvm.testing.parameters(
((10, 5), "mean", -100, "float32"),
((10, 5, 2, 2), "mean", -100, "float32"),
((10, 5), "sum", -100, "float32"),
((10, 5), "none", -100, "float32"),
((10, 5), "mean", 3, "float32"),
((10, 5), "mean", -100, "float64"),
)
def test_nll_loss(target, dev, prediction_shape, reduction, ignore_index, dtype):
C = prediction_shape[1]
target_shape = prediction_shape[:1] + prediction_shape[2:]
predictions = te.placeholder(shape=prediction_shape, name="predictions", dtype=dtype)
targets = te.placeholder(shape=target_shape, name="targets", dtype="int32")
weights = te.placeholder(shape=(C,), name="weights", dtype=dtype)
nll_loss_result = topi.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
with tvm.target.Target(target):
fschedule = tvm.topi.testing.get_reduce_schedule(target)
s = fschedule([nll_loss_result])
fn = tvm.build(s, [predictions, targets, weights, nll_loss_result], target, name="nll_loss")
predictions_npy = np.random.uniform(size=prediction_shape).astype(dtype)
targets_npy = np.random.randint(0, C, target_shape).astype("int32")
weights_npy = np.random.uniform(size=(C,)).astype(dtype)
out_npy = tvm.topi.testing.nll_loss(
predictions_npy, targets_npy, weights_npy, reduction, ignore_index
)
predictions_nd = tvm.nd.array(predictions_npy, dev)
targets_nd = tvm.nd.array(targets_npy, dev)
weights_nd = tvm.nd.array(weights_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(nll_loss_result.dtype), dev)
fn(predictions_nd, targets_nd, weights_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
"""Test code for local response normalization""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple |
import tvm.topi.testing |
import tvm.testing
_lrn_schedule = {
"generic": topi.generic.schedule_lrn,
"gpu": topi.cuda.schedule_lrn,
"opencl": topi.cuda.schedule_lrn,
"metal": topi.cuda.schedule_lrn,
"rocm": topi.cuda.schedule_lrn,
"vulkan": topi.cuda.schedule_lrn,
"nvptx": topi.cuda.schedule_lrn,
}
def verify_lrn(shape, size, axis, bias, alpha, beta, dtype="float32", rtol=1e-5, atol=1e-5):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.nn.lrn(A, size, axis, alpha, beta, bias)
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
def check_device(device):
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s_func = tvm.topi.testing.dispatch(device, _lrn_schedule)
s = s_func([B])
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=rtol, atol=atol)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]:
check_device(device)
@tvm.testing.uses_gpu
def test_lrn():
verify_lrn((1, 3, 5, 5), 3, 1, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 5, 5), 3, 3, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 20, 20), 3, 1, 2.0, 1.0, 0.75)
verify_lrn((1, 3, 5, 5), 3, 3, 1.0, 1.0, 0.5, dtype="float16", rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test_lrn() |
"""Test code for LSTM.""" |
import numpy as np
from rsa |
import verify |
import tvm
from tvm |
import te, topi |
import tvm.testing |
import tvm.topi.testing
def verify_lstm(
target,
dev,
seq_len,
batch_size,
in_dim,
hidden_dim,
proj_dim=0,
bias=True,
zero_init=True,
peephole=False,
reverse=False,
weight_layout="IFGO",
):
out_dim = proj_dim if proj_dim > 0 else hidden_dim
def rand(*shape):
sqrt_k = np.sqrt(1 / hidden_dim)
return np.random.uniform(-sqrt_k, sqrt_k, size=shape).astype("float32")
def get_ref_data():
Xs = np.random.normal(size=(seq_len, batch_size, in_dim)).astype("float32")
Wi = rand(4 * hidden_dim, in_dim)
Wh = rand(4 * hidden_dim, out_dim)
Bi = None
Bh = None
h0 = None
c0 = None
proj = None
p_i = None
p_f = None
p_o = None
if bias:
Bi = rand(4 * hidden_dim)
Bh = rand(4 * hidden_dim)
if not zero_init:
h0 = np.random.normal(size=(batch_size, out_dim)).astype("float32")
c0 = np.random.normal(size=(batch_size, hidden_dim)).astype("float32")
if proj_dim > 0:
proj = rand(proj_dim, hidden_dim)
if peephole:
p_i, p_f, p_o = [rand(batch_size, hidden_dim) for _ in range(3)]
hs, cs = tvm.topi.testing.lstm_python(
Xs,
Wi,
Wh,
Bi=Bi,
Bh=Bh,
h_init=h0,
c_init=c0,
proj=proj,
p_i=p_i,
p_f=p_f,
p_o=p_o,
reverse=reverse,
weight_layout=weight_layout,
)
return [Xs, Wi, Wh, Bi, Bh, h0, c0, proj, p_i, p_f, p_o], [hs, cs]
args_np, (hs_np, cs_np) = get_ref_data()
args = [te.placeholder(a.shape, "float32") if a is not None else a for a in args_np]
real_args = [a for a in args if a is not None]
hs, cs = topi.nn.lstm(*args, reverse=reverse, weight_layout=weight_layout)
with tvm.target.Target(target):
sch = topi.generic.schedule_lstm([hs, cs])
func = tvm.build(sch, real_args |
+ [hs, cs], target=target)
args_nd = [tvm.nd.array(a, dev) for a in args_np if a is not None]
hs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, out_dim), "float32"), dev)
cs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, hidden_dim), "float32"), dev)
func(*args_nd, hs_nd, cs_nd)
tvm.testing.assert_allclose(hs_nd.numpy(), hs_np, rtol=1e-4)
tvm.testing.assert_allclose(cs_nd.numpy(), cs_np, rtol=1e-4)
def test_lstm():
verify_lstm(
"llvm",
tvm.cpu(0),
1,
1,
1,
1,
0,
True,
True,
False,
False,
"IFGO",
)
verify_lstm(
"llvm",
tvm.cpu(0),
8,
4,
8,
16,
0,
True,
False,
False,
False,
"IFGO",
)
def test_lstm_proj():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 8, True, True, False, False, "IFGO")
def test_lstm_peephole():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, True, False, "IFGO")
def test_lstm_reverse():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, True, "IFGO")
def test_lstm_weight_layout_iofg():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, False, "IOFG")
def test_lstm_assorted():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 16, True, False, True, True, "OIGF") |
import sys |
import numpy as np |
import pytest |
import scipy
from scipy |
import special |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
from tvm.topi |
import utils
def test_util():
x = tvm.tir.const(100, "int32")
assert utils.get_const_int(x) == 100
assert utils.get_const_tuple((x, x)) == (100, 100)
ewise_operations = {
"floor": {"topi": topi.floor, "ref": np.floor, "input_range": (-100, 100)},
"ceil": {"topi": topi.ceil, "ref": np.ceil, "input_range": (-100, 100)},
"sign": {
"topi": topi.sign,
"ref": np.sign,
"input_range": (-100, 100),
"skip_name_check": True,
},
"trunc": {"topi": topi.trunc, "ref": np.trunc, "input_range": (-100, 100)},
"fabs": {"topi": topi.abs, "ref": np.fabs, "input_range": (-100, 100)},
"round": {"topi": topi.round, "ref": np.round, "input_range": (-100, 100), "check_round": True},
"exp": {"topi": topi.exp, "ref": np.exp, "input_range": (-1, 1)},
"tanh": {
"topi": topi.tanh,
"ref": np.tanh,
"input_range": (-10, 10),
"shape": (128, 128),
"dtype": ["float32", "float64"],
},
"sigmoid": {
"topi": topi.sigmoid,
"ref": lambda x: 1 / (1 + np.exp(-x)),
"input_range": (-1, 1),
},
"log": {"topi": topi.log, "ref": np.log, "input_range": (0, 100)},
"sqrt": {"topi": topi.sqrt, "ref": np.sqrt, "input_range": (0, 100)},
"rsqrt": {
"topi": topi.rsqrt,
"ref": lambda x: np.ones_like(x) / np.sqrt(x),
"input_range": (0, 100),
"skip_name_check": True,
},
"cos": {"topi": topi.cos, "ref": np.cos, "input_range": (-2.0 * np.pi, 2.0 * np.pi)},
"tan": {
"topi": topi.tan,
"ref": np.tan,
"input_range": (-2.0 * np.pi, 2.0 * np.pi),
"dtypes": ["float32", "float64"],
},
"sin": {"topi": topi.sin, "ref": np.sin, "input_range": (-2.0 * np.pi, 2.0 * np.pi)},
"erf": {"topi": topi.erf, "ref": scipy.special.erf, "input_range": (-0.1, 0.1)},
"isnan": {
"topi": topi.isnan,
"ref": np.isnan,
"input_range": (-1, 1),
"replace_with_nan": True,
},
"isfinite": {
"topi": topi.isf |
inite,
"ref": np.isfinite,
"input_range": (0, 1),
"shape": (8, 8),
"skip_name_check": True,
"replace_with_nan": True,
"replace_with_inf": True,
"dtypes": ["float32", "float64", "int32", "int16"],
},
"isinf": {
"topi": topi.isinf,
"ref": np.isinf,
"input_range": (0, 1),
"shape": (8, 8),
"skip_name_check": True,
"replace_with_nan": True,
"replace_with_inf": True,
"dtypes": ["float32", "float64", "int32", "int16"],
},
"fast_exp": {
"topi": topi.fast_exp,
"ref": np.exp,
"skip_name_check": True,
"input_range": (-88, 88),
"step": 0.01,
},
"fast_erf": {
"topi": topi.fast_erf,
"ref": scipy.special.erf,
"skip_name_check": True,
"input_range": (-10, 10),
"step": 0.01,
"dtypes": ["float32", "float16"],
"cast_output": True,
"tolerance": [1e-5, 1e-1],
},
"fast_tanh": {
"topi": topi.fast_tanh,
"ref": np.tanh,
"skip_name_check": True,
"input_range": (-10, 10),
"step": 0.01,
},
}
topi_name, dtype, tolerance = tvm.testing.parameters(
*[
(name, dtype, config.get("tolerance", [1e-5] * len(dtype))[i])
for name, config in ewise_operations.items()
for i, dtype in enumerate(config.get("dtypes", ["float32"]))
]
)
@tvm.testing.fixture(cache_return_value=True)
def ewise_ref_data(topi_name, dtype):
config = ewise_operations[topi_name]
input_range = config["input_range"]
shape = config.get("shape", (20, 3))
a_np = np.random.uniform(*input_range, size=shape).astype(dtype)
if dtype.startswith("float"):
if config.get("replace_with_nan", False):
a_np.ravel()[np.random.choice(a_np.size, int(a_np.size * 0.5), replace=False)] = np.nan
if config.get("replace_with_inf", False):
a_np.ravel()[
np.random.choice(a_np.size, int(a_np.size * 0.5), re |
place=False)
] = np.infty
if topi_name == "round":
a_np += ((np.abs(np.fmod(a_np, 1)) - 0.5) < 1e-6) * 1e-4
b_np = config["ref"](a_np)
if config.get("cast_output", False):
b_np = b_np.astype(dtype)
return a_np, b_np
def test_ewise(target, dev, topi_name, dtype, tolerance, ewise_ref_data):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and topi_name in ["tan", "erf", "isnan", "isfinite", "isinf"]:
pytest.xfail(f"Vulkan runtime doesn't support {topi_name} yet")
topi_op = ewise_operations[topi_name]["topi"]
skip_name_check = ewise_operations[topi_name].get("skip_name_check", False)
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), dtype=dtype, name="A")
B = topi_op(A)
assert tuple(B.shape) == tuple(A.shape)
if not skip_name_check:
assert B.op.body[0].op.name == "tir." + topi_name
a_np, b_np = ewise_ref_data
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=topi_name)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros_like(b_np), dev)
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=tolerance, atol=tolerance)
from_dtype, to_dtype = tvm.testing.parameters(
("int32", "float32"),
("int32", "float64"),
("int32", "bool"),
("float32", "int32"),
("float32", "float64"),
("float32", "bool"),
("bool", "float32"),
("bool", "int32"),
)
@tvm.testing.fixture(cache_return_value=True)
def cast_ref_data(from_dtype, to_dtype):
shape = (5, 4)
input_range = (-100, 100)
if from_dtype == "bool":
a_np = np.random.choice([True, False], size=shape)
else:
a_np = np.random.uniform(*input_range, size=shape).astype(from_dtype)
if to_dtype == "bool":
a_np = a_np - a_np[2, 3]
b_np = a_np.astype(to_dtype)
return a_np, b_np
def test_cast(target, dev, cast_ref_data, from_dtype, to_dtype): |
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), dtype=from_dtype, name="A")
B = topi.cast(A, to_dtype)
a_np, b_np = cast_ref_data
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=to_dtype, device=dev)
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple
def with_tvm(lam, *args):
"""Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
dev = tvm.cpu(0)
pls = []
vals_nd = []
for i, arg in enumerate(args):
pls.append(te.placeholder(arg.shape, name="pl" + str(i)))
vals_nd.append(tvm.nd.array(arg, dev))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), dev)
s = te.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd + [out_nd]))
return out_nd.numpy()
def verify_nn_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b)
c2 = with_tvm(
lambda A, B: topi.nn.matmul(A, B, transpose_a=transp_a, transpose_b=transp_b),
a,
b,
)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_nn_matmul():
verify_nn_matmul((1, 1), (1, 1), False, False)
verify_nn_matmul((1, 1), (1, 1), True, True)
verify_nn_matmul((2, 2), (2, 2), False, False)
verify_nn_matmul((2, 2), (2, 2), True, True)
verify_nn_matmul((2, 3), (3, 5), False, False)
verify_nn_matmul((5, 3), (3, 2), False, False)
verify_nn_matmul((3, 5), (3, 2), True, False)
verify_nn_matmul((3, 5), (2, 3), True, True)
verify_nn_matmul((3, 5), (3, 2), True, False)
verify_nn_matmul((5, 3), (2, 3), False, True)
def verify_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b)
c2 = with_tvm(lambda A, B: topi.matmul(A, B, transp_a, transp_b), a, b)
tvm.testing.assert_ |
allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_matmul():
verify_matmul((1, 1), (1, 1), False, False)
verify_matmul((1, 1), (1, 1), True, True)
verify_matmul((2, 2), (2, 2), False, False)
verify_matmul((2, 2), (2, 2), True, True)
verify_matmul((2, 3), (3, 5), False, False)
verify_matmul((5, 3), (3, 2), False, False)
verify_matmul((3, 5), (3, 2), True, False)
verify_matmul((3, 5), (2, 3), True, True)
def verify_tensordot(sa, sb, axes):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.tensordot(a, b, axes)
c2 = with_tvm(lambda A, B: topi.tensordot(A, B, axes), a, b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_tensordot():
verify_tensordot((3), (3), 0)
verify_tensordot((2, 3), (3, 5), 1)
verify_tensordot((2, 2, 3), (2, 3, 5), 2)
verify_tensordot((2, 2, 3, 4), (2, 3, 4, 5), 3)
verify_tensordot((3, 2, 2), (2, 3, 5), (1, 0))
verify_tensordot((3, 2, 2), (2, 3, 5), ((1, 0), (0, 1)))
verify_tensordot((4, 3, 2, 2), (2, 4, 3, 5), ((1, 2, 0), (2, 0, 1)))
if __name__ == "__main__":
test_nn_matmul()
test_matmul()
test_tensordot() |
"""Test code for pooling""" |
import math |
import numpy as np |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import te, topi
from tvm.topi.utils |
import get_const_tuple
_pool_schedule = {
"generic": topi.generic.schedule_pool,
"cpu": topi.x86.schedule_pool,
"gpu": topi.cuda.schedule_pool,
"hls": topi.hls.schedule_pool,
}
_adaptive_pool_schedule = {
"generic": topi.generic.schedule_adaptive_pool,
"cpu": topi.x86.schedule_adaptive_pool,
"gpu": topi.cuda.schedule_adaptive_pool,
"hls": topi.hls.schedule_adaptive_pool,
}
_pool_grad_schedule = {
"generic": topi.generic.schedule_pool_grad,
"gpu": topi.cuda.schedule_pool_grad,
}
def verify_pool_grad(
n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True, add_relu=False
):
"""verify function of pool_grad"""
iw = ih
kw = kh
sw = sh
pt, pl, pb, pr = padding
A = te.placeholder((n, ic, ih, iw), name="A")
B = topi.nn.pool2d(
A,
kernel=[kh, kw],
stride=[sh, sw],
dilation=[1, 1],
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout="NCHW",
count_include_pad=count_include_pad,
)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
OutGrad = te.placeholder(bshape, name="OutGrad")
PoolGrad = topi.nn.pool_grad(
OutGrad,
A,
kernel=[kh, kw],
stride=[sh, sw],
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout="NCHW",
count_include_pad=count_include_pad,
)
if add_relu:
PoolGrad = topi.nn.relu(PoolGrad)
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
out_grad_np = np.random.uniform(low=0.001, size=bsha |
pe).astype(dtype)
pool_grad_np = tvm.topi.testing.pool_grad_nchw(
a_np,
out_grad_np,
pool_size=(kh, kw),
strides=(sh, sw),
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
if add_relu:
pool_grad_np = np.maximum(pool_grad_np, 0.0)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _pool_grad_schedule)
s = s_func(PoolGrad)
a = tvm.nd.array(a_np, dev)
out_grad = tvm.nd.array(out_grad_np, dev)
pool_grad = tvm.nd.array(np.zeros(get_const_tuple(PoolGrad.shape), dtype=dtype), dev)
f = tvm.build(s, [A, OutGrad, PoolGrad], target)
f(a, out_grad, pool_grad)
tvm.testing.assert_allclose(pool_grad.numpy(), pool_grad_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_pool_grad():
"""test cases of pool_grad"""
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "avg", False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "avg", False, True)
verify_pool_grad(1, 256, 31, 3, 3, [1, 2, 1, 2], "avg", False, True)
verify_pool_grad(1, 256, 32, 2, 2, [1, 2, 1, 2], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [2, 2, 2, 2], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", True)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 0, 3], "avg", False, True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 3, 2, 1], "avg", False, False)
verify_pool_grad(1, 256, 31, 3, 3, [1, 0, 3, 2], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [3, 2, 1, 0], "max", True)
veri |
fy_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "max", False)
verify_pool_grad(1, 256, 32, 1, 2, [1, 1, 1, 1], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False, add_relu=True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False, add_relu=True)
def verify_global_pool(dshape, pool_type, layout="NCHW"):
"""verify function of global_pool"""
assert layout in ["NCHW", "NHWC"]
A = te.placeholder(shape=dshape, name="A")
B = topi.nn.global_pool(A, pool_type=pool_type, layout=layout)
B = topi.nn.relu(B)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
axis = (layout.find("H"), layout.find("W"))
if pool_type == "avg":
b_np = np.mean(a_np, axis=axis, keepdims=True)
elif pool_type == "max":
b_np = np.max(a_np, axis=axis, keepdims=True)
b_np = np.maximum(b_np, 0.0)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _adaptive_pool_schedule)
if target == "cuda":
s = s_func(B, layout)
else:
s = s_func(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_global_pool():
"""test cases of global_pool"""
verify_global_pool((1, 1024, 7, 7), "avg")
verify_global_pool((4, 1024, 7, 7), "avg")
verify_global_pool((1, 1024, 7, 7), "max")
verify_global_pool((4, 1024, 7, 7), "max")
verify_global_pool((1, 7, 7, 1024), "avg", "NHWC")
verify_global_pool((4, 7, 7, 1024), "avg", "NHWC")
verify_global_pool((1, 7, 7, 1024), "max", "NHWC")
verify_global_pool((4, 7, 7, 1024), "max", "NHWC") |
def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
"""verify function of adaptive_pool"""
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
oshape = np_out.shape
data = te.placeholder(dshape, name="data", dtype=dtype)
if len(out_size) == 2:
out = topi.nn.adaptive_pool(data, out_size, pool_type, layout)
else:
assert len(out_size) == 3
out = topi.nn.adaptive_pool3d(data, out_size, pool_type, layout)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _adaptive_pool_schedule)
if target == "cuda":
s = s_func(out, layout)
else:
s = s_func(out)
a = tvm.nd.array(np_data, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(oshape), dtype=out.dtype), dev)
f = tvm.build(s, [data, out], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_out, rtol=4e-5, atol=1e-6)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_adaptive_pool():
"""test cases of adaptive_pool"""
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "max")
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "avg")
verify_adaptive_pool((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg")
verify_adaptive_pool((1, 224, 224, 3), (1, 1), "max", layout="NHWC")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg", layout="NHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "max", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 2, 2), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 64, 32, 32), (7, 8, 9), "avg", layout="NCD |
HW")
verify_adaptive_pool((1, 16, 64, 32, 32), (8, 16, 16), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 2, 2), "max", layout="NDHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC")
def verify_poolnd(
n,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout,
count_include_pad=True,
):
"""verify function of pool1d"""
A = te.placeholder(input_shape, name="A")
if n == 1:
B = topi.nn.pool1d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 2:
B = topi.nn.pool2d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 3:
B = topi.nn.pool3d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
else:
raise ValueError(f"PoolND only supports n=1, 2, 3 got n={n}")
B = topi.nn.relu(B)
dtype = A.dtype
output_shape = [int(i) for i in B.shape]
input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
padding_before = padding[:n]
padding_after = padding[n:]
ref_np = tvm.topi.testing.poolnd_python(
input_np,
kernel,
stride,
dilation,
padding_before,
padding_after,
pool_type, |
count_include_pad,
ceil_mode,
layout=layout,
)
np.testing.assert_equal(tuple(output_shape), tuple(ref_np.shape))
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _pool_schedule)
s = s_func(B, layout)
a = tvm.nd.array(input_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), ref_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def verify_pool3d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCDHW",
):
verify_poolnd(
3,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool3d():
"""test cases of pool3d"""
verify_pool3d(
[1, 16, 32, 32, 32], [2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0, 0, 0, 0], "avg", False, True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [1, 1, 2, 2, 2, 1], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 32, 32, 32], [2, 2, 2], [2, 2 |
, 2], [1, 1, 1], [0, 0, 0, 0, 0, 0], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 2, 1, 1, 1, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 2, 1, 1, 1, 2], "max", True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [3, 2, 1, 0, 5, 4], "max", True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [3, 3, 3], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 1, 3], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 2, 3], [3, 2, 1, 0, 5, 4], "max", True
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d( |
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
True,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 0, 5, 4, 3, 2],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[3, 2, 1, 0, 5, 4],
"max",
True,
layout="NDHWC",
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [3, 3, 3], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
" |
avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 1, 3], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 2, 3], [3, 2, 1, 0, 5, 4], "max", True
)
def verify_pool2d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCHW",
):
verify_poolnd(
2,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool2d():
"""test cases of pool"""
verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False)
verify_poo |
l2d([1, 16, 31, 31], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True)
verify_pool2d(
[1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True, layout="NHWC")
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True, layout="NHWC")
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True, layout="NHWC")
def verify_ |
pool1d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCW",
):
verify_poolnd(
1,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool1d():
"""test cases of pool1d"""
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 0], "avg", False, True)
verify_pool1d([1, 16, 31], [3], [3], [1], [1, 2], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [1], [1, 2], "avg", False, False)
verify_pool1d([1, 16, 31], [4], [4], [1], [3, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [4], [4], [1], [0, 0], "avg", False, False)
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 0], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 1], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 1], "max", True)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 5], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [3], [3], [1], [1, 4], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [3, 0], "max", True)
verify_pool1d([1, 16, 31], [3], [3], [2], [2, 5], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [3], [0, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [3], [3], [2], [1, 4], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [3], [3, 0], "max", True)
verify_pool1d([1, 32, 16], [2], [2], [1], [0, 0], "avg", False, True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [1, 2], "avg", False, True, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [1], [1, 2], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [4], [4], [1], [3, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [4], [4], [1], [0, 0], "avg", False, False, layo |
ut="NWC")
verify_pool1d([1, 32, 16], [2], [2], [1], [0, 0], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 1], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 1], "max", True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 5], "avg", False, True, layout="NWC")
verify_pool1d([1, 31, 16], [2], [2], [1], [0, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [1, 4], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [3, 0], "max", True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [2], [2, 5], "avg", False, True, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [3], [0, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [2], [1, 4], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [3], [3, 0], "max", True, layout="NWC")
if __name__ == "__main__":
test_pool1d()
test_pool2d()
test_pool3d()
test_pool_grad()
test_global_pool()
test_adaptive_pool() |
import tvm |
import tvm.relay |
import tvm.testing |
import tvm.topi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.