text
stringlengths 1
2.05k
|
---|
nc)
@tvm.testing.uses_gpu
def test_adaptive_pool():
verify_adaptive_pool1d((1, 9, 224), (1), "max")
verify_adaptive_pool1d((1, 3, 224), (3), "avg")
verify_adaptive_pool1d((1, 3, 224), (3), "avg", dtype="int32")
verify_adaptive_pool1d((1, 14, 78), (13), "max")
verify_adaptive_pool1d((1, 5, 97), (96), "avg")
verify_adaptive_pool1d((1, 224, 3), (1), "max", layout="NWC")
verify_adaptive_pool1d((1, 3, 224), (3), "avg", layout="NWC")
verify_adaptive_pool2d((1, 9, 224, 224), (1, 1), "max")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg", dtype="int32")
verify_adaptive_pool2d((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool2d((1, 5, 46, 97), (4, 96), "avg")
verify_adaptive_pool2d((1, 224, 224, 3), (1, 1), "max", layout="NHWC")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg", layout="NHWC")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "max", layout="NCDHW")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW", dtype="int32")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC", dtype="int32")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC")
@tvm.testing.uses_gpu
def test_sequence_mask(executor_kind):
def _verify(data_shape, mask_value, axis, dtype, itype):
max_length = data_shape[axis]
nbatch = data_shape[1 - axis]
data = relay.var("data", relay.TensorType(data_shape, dtype))
valid_length = relay.var("valid_length", relay.TensorType((nbatch,), itype))
out = relay.sequence_mask(data, valid_length, mask_value, axis)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(data_shape, dtype)
func = relay.Function([data, valid_length], out) |
data_np = np.random.uniform(size=data_shape).astype(dtype)
valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype)
gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(data_np, valid_length_np)
tvm.testing.assert_allclose(out_relay.numpy(), gt_out_np)
_verify((5, 10), 0.0, 1, "float32", "int32")
_verify((2, 3, 5, 3), 0.0, 0, "float32", "int64")
_verify((5, 8, 3), 0.1, 1, "float64", "float32")
@tvm.testing.uses_gpu
def test_one_hot(executor_kind):
def _get_oshape(indices_shape, depth, axis):
oshape = []
true_axis = len(indices_shape) if axis == -1 else axis
ndim = len(indices_shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices_shape[indices_index])
indices_index += 1
return oshape
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
on_value_const = relay.const(on_value)
off_value_const = relay.const(off_value)
out = relay.one_hot(indices, on_value_const, off_value_const, depth, axis, dtype)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(
_get_oshape(indices_shape, depth, axis), dtype
)
func = relay.Function([indices], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev |
, target=target).evaluate(
func
)(indices_np)
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_matrix_set_diag(executor_kind):
def _verify(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"):
input = relay.var("input", relay.TensorType(input_shape, dtype))
diagonal = relay.var("diagonal", relay.TensorType(diagonal_shape, dtype))
out = relay.matrix_set_diag(input, diagonal, k, align)
in_type = run_infer_type(input)
out_type = run_infer_type(out)
assert in_type.checked_type == out_type.checked_type
func = relay.Function([input, diagonal], out)
input_np = np.random.randint(-100, 100, size=input_shape).astype(dtype)
diagonal_np = np.random.randint(-100, 100, size=diagonal_shape).astype(dtype)
out_np = tvm.topi.testing.matrix_set_diag(input_np, diagonal_np, k, align)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(input_np, diagonal_np)
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((2, 2), (2,), "float32")
_verify((4, 3, 3), (4, 3), "int32")
_verify((2, 3, 4), (2, 3), "float32", 1)
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "LEFT_RIGHT")
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "LEFT_LEFT")
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "RIGHT_RIGHT")
@tvm.testing.parametrize_targets
def test_nll_loss(executor_kind, dev, target):
def _get_oshape(target_shape, reduction):
if reduction == "none":
return target_shape
else:
ret |
urn []
def _verify(prediction_shape, reduction="mean", ignore_index=-100, dtype="float32"):
C = prediction_shape[1]
target_shape = prediction_shape[:1] + prediction_shape[2:]
predictions = relay.var("predictions", relay.TensorType(prediction_shape, dtype))
targets = relay.var("targets", relay.TensorType(target_shape, "int32"))
weights = relay.var("weights", relay.TensorType((C,), dtype))
out = relay.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(
_get_oshape(target_shape, reduction), dtype
)
func = relay.Function([predictions, targets, weights], out)
predictions_np = np.random.uniform(size=prediction_shape).astype(dtype)
targets_np = np.random.randint(0, C, target_shape).astype("int32")
weights_np = np.random.uniform(size=(C,)).astype(dtype)
out_np = tvm.topi.testing.nll_loss(
predictions_np, targets_np, weights_np, reduction, ignore_index
)
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
predictions_np, targets_np, weights_np
)
tvm.testing.assert_allclose(out_relay.numpy(), out_np, rtol=1e-6, atol=1e-6)
_verify((10, 5))
_verify((10, 5, 2, 2))
_verify((10, 5), reduction="sum")
_verify((10, 5), reduction="none")
_verify((10, 5), ignore_index=3)
_verify((10, 5), dtype="float64")
if __name__ == "__main__":
tvm.testing.main() |
""" Support level2 operator test cases.
""" |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import autotvm, relay, te
from tvm.contrib |
import utils, cudnn
from tvm.ir.module |
import IRModule
from tvm.relay |
import transform
from tvm.relay.testing |
import run_infer_type
from tvm.topi.cuda.conv3d_winograd |
import _infer_tile_size
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_conv1d_infer_type():
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.ty.TensorType((n, c, w), "float32"))
w = relay.var("w")
y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((2, 10, 3), "float32")
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222), "int32")
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222), "int32")
n, c, w = 4, 32, 224
x = relay.var("x", relay.TensorType((n, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv1d(
x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout="NWC", out_dtype="int32"
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, w, 16), "int32")
@tvm.testing.uses_gpu
def test_conv1d_run():
def run_test_conv1d(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1),
fref=None,
dilation=1,
except_targets=None,
**attrs,
):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv1d(x, w, padding=padding, dilation=dilat |
ion, **attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv1d_ncw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation
)
for target, dev in tvm.testing.enabled_targets():
if target in except_targets:
continue
dev = tvm.device(target, 0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
dshape = (1, 3, 224)
kshape = (10, 3, 3)
run_test_conv1d(
"float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3
)
run_test_conv1d("int8", "int32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3)
dshape = (1, 3, 18)
kshape = (10, 3, 3)
run_test_conv1d(
"float32",
"float32",
1,
dshape,
kshape,
padding=(1, 1),
channels=10,
kernel_size=3,
dilation=3,
)
@tvm.testing.uses_gpu
def test_conv2d_infer_type():
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((2, 10, 3, 3), "float32")
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert |
yy.checked_type == relay.TensorType((n, 2, 222, 222), "int32")
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222), "int32")
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n
wt = relay.var("w")
y = relay.nn.conv2d(
x,
wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NCHW4n4c",
kernel_layout="OIHW4o4i",
out_dtype="int32",
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4, 224, 224, 4, 4), "int32")
assert yy.args[1].checked_type == relay.TensorType((4, 8, 3, 3, 4, 4), "int8")
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(
x,
wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NHWC",
out_dtype="int32",
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, h, w, 16), "int32")
class TestConv2D:
config = {
"group1": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 32, 18, 18),
kshape=(32, 4, 3, 3),
padding=(1, 1),
channels=32,
groups=8,
kernel_size=(3, 3),
dilation=(1, 1),
),
"group2": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 32, 18, 18),
kshape=(64, 1, 3, 3),
padding=(1, 1),
channels=64,
groups=32,
kernel_size=(3, 3),
dilation=(1, 1),
), |
"normal": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 3, 224, 224),
kshape=(10, 3, 3, 3),
padding=(1, 1),
channels=10,
groups=1,
kernel_size=(3, 3),
dilation=(1, 1),
),
"mixed_precision_int8_int32_case1": dict(
dtype="int8",
out_dtype="int32",
scale=1,
dshape=(1, 3, 224, 224),
kshape=(10, 3, 3, 3),
padding=(1, 1),
channels=10,
groups=1,
kernel_size=(3, 3),
dilation=(1, 1),
),
"mixed_precision_int8_int32_case2": dict(
dtype="int8",
out_dtype="int32",
scale=1,
dshape=(1, 3, 224, 224),
kshape=(10, 3, 1, 3),
padding=(0, 1),
channels=10,
groups=1,
kernel_size=(1, 3),
dilation=(1, 1),
),
"dilated": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 3, 18, 18),
kshape=(10, 3, 3, 3),
padding=(1, 1),
channels=10,
groups=1,
kernel_size=(3, 3),
dilation=(3, 3),
),
}
(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding,
channels,
groups,
kernel_size,
dilation,
) = tvm.testing.parameters(
*[
[
d[p]
for p in [
"dtype",
"out_dtype",
"scale",
"dshape",
"kshape",
"padding",
"channels",
"groups",
"kernel_size",
"dilation",
]
]
for d in config.values()
], |
ids=config.keys(),
)
def test_run(
self,
target,
dev,
dtype,
out_dtype,
scale,
dshape,
kshape,
padding,
groups,
dilation,
channels,
kernel_size,
):
target = tvm.target.Target(target)
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
ref_res = tvm.topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups
)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4, atol=1e-4)
def test_compile_depthwise_conv2d_arm_cpu():
dtype = "float32"
out_dtype = "float32"
scale = 1
dshape = (1, 512, 32, 32)
kshape = (512, 1, 3, 3)
padding = (1, 1)
channels = 512
groups = 512
kernel_size = (3, 3)
dilation = (1, 1)
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
test_schedule = '{"i": ["llvm -device=arm_cpu", "depthwise_conv2d_nchw_spatial_pack.arm_cpu", \
[["T |
ENSOR", [1, 512, 32, 32], "float32"], \
["TENSOR", [512, 1, 3, 3], "float32"], \
[1, 1], [1, 1], [1, 1], "float32"], {}, \
["depthwise_conv2d_nchw_spatial_pack.arm_cpu", [1, 512, 32, 32, "float32"], \
[512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \
{"i": 743640, "t": "", "c": null, \
"e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \
["tile_ow", "sp", [1, 8]], \
["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \
["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \
["ann_reduce", "an", ["unroll", "none"]], \
["ann_spatial", "an", ["unroll", "unroll", "vec"]], \
["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \
["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \
0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}'
temp = utils.tempdir()
with open(temp.relpath("temp.log"), "w") as log_file:
log_file.write(test_schedule)
with autotvm.apply_history_best(temp.relpath("temp.log")):
with tvm.transform.PassContext(opt_level=3):
print("Compiling...")
graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu")
@tvm.testing.uses_gpu
def test_conv2d_winograd(): |
class WinogradFallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.is_fallback = False
cfg.cost = 0.1 if "winograd" in workload[0] else 1
cfg["tile_b"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_y"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_x"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_rc"] = autotvm.task.space.SplitEntity([-1, 1])
cfg["auto_unroll_max_step"] = autotvm.task.space.OtherOptionEntity(1500)
cfg["unroll_explicit"] = autotvm.task.space.OtherOptionEntity(1)
self.memory[key] = cfg
return cfg
def run_test_conv2d_cuda(
dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv2d_nchw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups
)
with WinogradFallback(), tvm.transform.PassContext(opt_level=3):
for target, dev in tvm.testing.enabled_targets():
if target != "cuda":
continue
dev = tvm.device(target, 0)
params = {"w": tvm.nd.array(kernel)}
graph, lib, params = relay.build_module.build(mod, target=ta |
rget, params=params)
module = tvm.contrib.graph_executor.create(graph, lib, dev)
module.set_input("x", tvm.nd.array(data))
module.set_input(**params)
module.run()
op_res1 = module.get_output(0)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-3, atol=1e-3)
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)
)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)
)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)
)
kshape = (192, 80, 7, 7)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)
)
@tvm.testing.uses_gpu
def test_conv3d_infer_type():
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((2, 10, 3, 3, 3), "float32")
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222, 222), "int32")
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3 |
, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222, 222), "int32")
n, c, d, h, w = 4, 32, 224, 224, 224
x = relay.var("x", relay.TensorType((n, d, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv3d(
x,
wt,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
channels=16,
data_layout="NDHWC",
out_dtype="int32",
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, d, h, w, 16), "int32")
x = relay.var("x", relay.TensorType((1, 16, 224, 224, 224), "float32"))
w = relay.var("w", relay.TensorType((4, 4, 1, 1, 1), "float32"))
y = relay.nn.conv3d(x, w, groups=4, kernel_size=(1, 1, 1), channels=4)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4, 224, 224, 224), "float32")
@tvm.testing.uses_gpu
def test_conv3d_run():
def run_test_conv3d(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs,
):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = tvm.topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups
)
else:
ref_res = fref(data.astype(o |
ut_dtype), dkernel.astype(out_dtype))
for target, dev in tvm.testing.enabled_targets():
if target in except_targets:
continue
dev = tvm.device(target, 0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
dshape = (1, 3, 5, 224, 224)
kshape = (10, 3, 3, 3, 3)
run_test_conv3d(
"float32",
"float32",
1,
dshape,
kshape,
padding=(1, 1, 1),
channels=10,
kernel_size=(3, 3, 3),
)
@tvm.testing.uses_gpu
def test_conv3d_ndhwc_run():
def run_test_conv3d(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs,
):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
data_layout="NDHWC",
kernel_layout="DHWIO",
**attrs,
)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = tvm.topi.testing.conv3d_ndhwc_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding
)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, dev in tvm.testing.enabled_targets():
if target in except_targets: |
continue
dev = tvm.device(target, 0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
dshape = (1, 5, 224, 224, 6)
kshape = (3, 3, 3, 6, 10)
run_test_conv3d(
"float32",
"float32",
1,
dshape,
kshape,
padding=(1, 1, 1),
channels=10,
kernel_size=(3, 3, 3),
except_targets=["cuda"],
)
@tvm.testing.uses_gpu
def test_conv3d_winograd(): |
class WinogradFallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.is_fallback = False
cfg.cost = 0.1 if "winograd" in workload[0] else 1
cfg["tile_b"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_y"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_x"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_rc"] = autotvm.task.space.SplitEntity([-1, 1])
cfg["auto_unroll_max_step"] = autotvm.task.space.OtherOptionEntity(0)
cfg["unroll_explicit"] = autotvm.task.space.OtherOptionEntity(1)
self.memory[key] = cfg
return cfg
def run_test_conv3d_cuda(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1, 1),
groups=1,
dilation=(1, 1, 1),
prepack=False,
**attrs,
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
if prepack:
tile_size = _infer_tile_size(np.zeros(shape=dshape), np.zeros(shape=kshape))
w_packed = relay.nn.contrib_conv3d_winograd_weight_transform(w, tile_size)
y = relay.nn.contrib_conv3d_winograd_without_weight_transform(
x,
w_packed,
tile_size,
padding=padding,
dilation=dilation,
groups=groups,
channels=kshape[0],
**attrs,
)
else:
y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size= |
dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups
)
with WinogradFallback(), tvm.transform.PassContext(opt_level=3):
for target, dev in tvm.testing.enabled_targets():
if target != "cuda":
continue
dev = tvm.device(target, 0)
params = {"w": tvm.nd.array(kernel)}
graph, lib, params = relay.build_module.build(mod, target=target, params=params)
module = tvm.contrib.graph_executor.create(graph, lib, dev)
module.set_input("x", tvm.nd.array(data))
module.set_input(**params)
module.run()
op_res1 = module.get_output(0)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-3, atol=1e-3)
dshape = (1, 32, 16, 16, 16)
kshape = (64, 32, 3, 3, 3)
run_test_conv3d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(1, 1, 1), kernel_size=(3, 3, 3)
)
kshape = (64, 32, 1, 3, 3)
run_test_conv3d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(0, 1, 1), kernel_size=(1, 3, 3)
)
dshape = (1, 61, 20, 20, 20)
kshape = (120, 61, 5, 5, 5)
run_test_conv3d_cuda(
"float32",
"float32",
1,
dshape,
kshape,
padding=(2, 2, 2),
channels=120,
kernel_size=(5, 5, 5),
)
kshape = (120, 61, 1, 5, 5)
run_test_conv3d_cuda(
"float32",
"float32",
1,
dshape,
kshape,
padding=(0, 2, 2),
channels=120,
kernel_size=(1, 5, 5),
)
@tvm.testing.uses_gpu
def test_conv3d_transpose_infer_type():
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32"))
w = relay |
.var("w")
y = relay.nn.conv3d_transpose(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((10, 2, 3, 3, 3), "float32")
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
w = relay.var("w", relay.TensorType((10, 12, 3, 3, 3), "int8"))
y = relay.nn.conv3d_transpose(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 12, 226, 226, 226), "int32")
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8"))
w = relay.var("w", relay.TensorType((10, 12, 3, 3, 3), "int8"))
y = relay.nn.conv3d_transpose(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 12, 226, 226, 226), "int32")
@tvm.testing.uses_gpu
def test_conv3d_transpose_ncdhw_run():
dshape = (1, 3, 24, 24, 24)
kshape = (3, 4, 2, 2, 2)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv3d_transpose(
x, w, channels=4, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding=(1, 1, 1)
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1, 0)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv2d_transpose_infer_type():
n, c, h, w = te.size_var("n |
"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15)
assert "channels=15" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 15, 10, 12), "float32")
assert yy.args[1].checked_type == relay.TensorType((10, 15, 3, 3), "float32")
n, h, w, c = te.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 15, 15, 11), "float32")
@tvm.testing.uses_gpu
def test_conv2d_transpose_nchw_run():
k_layouts = {"OIHW": (10, 3, 3, 3), "IOHW": (3, 10, 3, 3)}
output_padding = (1, 1)
for k_layout, kshape in k_layouts.items():
dshape = (1, 3, 18, 18)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(2, 2),
padding=(1, 1),
output_padding=output_padding,
kernel_layout=k_layout,
data_layout="NCHW",
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
if k_layout != "IOHW":
kernel_iohw = np.transpose(kernel, [1, 0, 2, 3])
else:
kernel_iohw = kernel
ref_res = tvm.topi.testing.conv2d_transpose_nchw_python(
data, kernel_iohw, 2, 1, output_padding
)
enabled_targets = tvm.testing.enabled_targets()
if cudnn.exists() and k_layout == "IOHW":
enabled_targets.append(("cuda |
-libs=cudnn", tvm.cuda(0)))
for target, dev in enabled_targets:
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv2d_transpose_nhwc_run():
dshape_nhwc = (1, 18, 18, 3)
kshape_hwoi = (3, 3, 10, 3)
x = relay.var("x", shape=dshape_nhwc)
w = relay.var("w")
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(2, 2),
padding=(1, 1),
output_padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape_nhwc).astype(dtype)
kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
ref_res = tvm.topi.testing.conv2d_transpose_nhwc_python(
data, kernel, "HWOI", 2, 1, output_padding=(1, 1)
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv2d_transpose_nhwc_cudnn():
if not cudnn.exists():
return
dshape_nhwc = (1, 18, 18, 3)
kshape_ihwo = (3, 3, 3, 10)
x = relay.var("x", shape=dshape_nhwc)
w = relay.var("w", shape=kshape_ihwo)
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(2, 2),
padding=(1, 1),
output_padding=(1, 1),
data_layout="NHWC",
kernel_layout="IHWO",
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape_nhwc).astype(dtype)
kernel = np.random.uniform(size=kshape_ihwo).astype(dtype)
ref_res = tvm.topi.testing.conv2d_tra |
nspose_nhwc_python(
data, np.transpose(kernel, [1, 2, 3, 0]), "HWOI", 2, 1, output_padding=(1, 1)
)
target = "cuda -libs=cudnn"
dev = tvm.cuda(0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv1d_transpose_ncw_run():
dshape = (1, 3, 18)
kshape = (3, 10, 3)
oshape = (1, 10, 36)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv1d_transpose(
x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(1,)
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv1d_transpose_ncw_python(data, kernel, 2, 1, output_padding=(1,))
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsampling_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
scale = tvm.tir.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
'method="BINLINEAR"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(
n,
c,
tvm.tir.Cast("int32", te.round(h * scale)),
tvm.tir.Cast("int32", te.round(w * scale)),
),
"float32",
)
n, c = te.size_var("n"), te.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", metho |
d="bilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32")
@tvm.testing.uses_gpu
def test_upsampling3d_infer_type():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
scale = tvm.tir.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = relay.nn.upsampling3d(
x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear"
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(
n,
c,
tvm.tir.Cast("int32", te.round(d * scale)),
tvm.tir.Cast("int32", te.round(h * scale)),
tvm.tir.Cast("int32", te.round(w * scale)),
),
"float32",
)
n, c = te.size_var("n"), te.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32"))
y = relay.nn.upsampling3d(
x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear"
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32")
def _test_global_pool2d(opfunc, reffunc):
n, c, h, w = te.size_var("n"), te.size_var("c"), 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
y = opfunc(x, layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32")
dtype = "float32"
dshape = (1, 1024, 7, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2, 3), kee |
pdims=True)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_pool2d():
def _test_pool2d(opfunc, pool_type, pool_size=2, strides=2, dilation=1, padding=0):
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32")
dtype = "float32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape)
y = opfunc(x, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
data,
[pool_size, pool_size],
[strides, strides],
[dilation, dilation],
[padding, padding],
[padding, padding],
pool_type,
count_include_pad=False,
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_pool2d_int(opfunc, reffunc, dtype):
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)
dtype = "int32"
dshape = (1, 3, 28, 28)
for shape_dtype in ["int32", "int64"]: |
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.randint(low=-128, high=128, size=dshape)
ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5)).astype(dtype)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool2d(relay.nn.max_pool2d, "max")
_test_pool2d(relay.nn.max_pool2d, "max", pool_size=2, strides=2, padding=0)
_test_pool2d(relay.nn.max_pool2d, "max", pool_size=2, strides=2, padding=0, dilation=2)
_test_pool2d(relay.nn.avg_pool2d, "avg")
_test_pool2d(relay.nn.avg_pool2d, "avg", pool_size=2, strides=2, padding=0)
_test_pool2d(relay.nn.avg_pool2d, "avg", pool_size=2, strides=2, padding=0, dilation=2)
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, "int32")
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, "uint16")
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)
def _test_global_pool1d(opfunc, reffunc):
n, c, w = te.size_var("n"), te.size_var("c"), 224
x = relay.var("x", relay.TensorType((n, w, c), "float32"))
y = opfunc(x, layout="NWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, c), "float32")
n, c, w = te.size_var("n"), te.size_var("c"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1), "float32")
dtype = "float32"
dshape = (1, 1024, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.rando |
m.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2,), keepdims=True)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_pool1d():
def _test_pool1d(
opfunc, pool_type, pool_size=2, strides=2, dilation=1, padding=0, dtype="float32"
):
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x, pool_size=(1,))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224), "float32")
dshape = (1, 3, 32)
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
pool_type = "max" if "max" in str(opfunc) else "avg"
y = opfunc(x, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
data,
[pool_size],
[strides],
[dilation],
[padding],
[padding],
pool_type,
count_include_pad=False,
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool1d(relay.nn.max_pool1d, "max")
_test_pool1d(relay.nn.max_pool1d, "max", dtype="int32")
_test_pool1d(relay.nn.max_pool1d, "max", pool_size=2, strides=2, padding=0) |
_test_pool1d(relay.nn.max_pool1d, "max", pool_size=2, strides=2, padding=0, dilation=2)
_test_pool1d(relay.nn.avg_pool1d, "avg")
_test_pool1d(relay.nn.avg_pool1d, "avg", dtype="int32")
_test_pool1d(relay.nn.avg_pool1d, "avg", pool_size=2, strides=2, padding=0)
_test_pool1d(relay.nn.avg_pool1d, "avg", pool_size=2, strides=2, padding=0, dilation=2)
_test_global_pool1d(relay.nn.global_max_pool1d, np.max)
_test_global_pool1d(relay.nn.global_avg_pool1d, np.mean)
@tvm.testing.uses_gpu
def test_pool3d():
def _test_pool3d(
opfunc,
pool_type,
pool_size=2,
strides=2,
dilation=1,
padding=[0, 0, 0, 0, 0, 0],
dtype="float32",
):
n, c, d, h, w = te.size_var("n"), 10, 5, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), "float32")
dtype = "float32"
dshape = (1, 3, 32, 32, 32)
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
pool_type = "max" if "max" in str(opfunc) else "avg"
y = opfunc(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=dilation,
)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
data,
[pool_size, pool_size, pool_size],
[strides, strides, strides],
[dilation, dilation, dilation],
padding[:3],
padding[3:],
pool_type,
count_include_pad=False,
ceil_mode=False,
)
for target |
, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool3d(relay.nn.max_pool3d, "max")
_test_pool3d(relay.nn.max_pool3d, "max", dtype="int32")
_test_pool3d(relay.nn.max_pool3d, "max", padding=(2, 0, 0, 2, 0, 0))
_test_pool3d(relay.nn.max_pool3d, "max", padding=(0, 3, 0, 0, 3, 0))
_test_pool3d(relay.nn.max_pool3d, "max", padding=(0, 0, 4, 0, 0, 4))
_test_pool3d(relay.nn.max_pool3d, "max", pool_size=2, strides=2)
_test_pool3d(relay.nn.max_pool3d, "max", pool_size=2, strides=2, dilation=2)
_test_pool3d(relay.nn.avg_pool3d, "avg")
_test_pool3d(relay.nn.avg_pool3d, "avg", dtype="int32")
_test_pool3d(relay.nn.avg_pool3d, "avg", padding=(2, 0, 0, 2, 0, 0))
_test_pool3d(relay.nn.avg_pool3d, "avg", padding=(0, 3, 0, 0, 3, 0))
_test_pool3d(relay.nn.avg_pool3d, "avg", padding=(0, 0, 4, 0, 0, 4))
_test_pool3d(relay.nn.avg_pool3d, "avg", pool_size=2, strides=2)
_test_pool3d(relay.nn.avg_pool3d, "avg", pool_size=2, strides=2, dilation=2)
@tvm.testing.uses_gpu
def test_avg_pool2d_no_count_pad():
kh, kw = (4, 4)
sh, sw = (2, 2)
ph, pw = (2, 2)
n = 1
(ic, ih, iw) = (3, 28, 28)
(oc, oh, ow) = (3, 15, 15)
dshape = (n, ic, ih, iw)
x = relay.var("x", shape=dshape)
y = relay.nn.avg_pool2d(
x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False
)
func = relay.Function([x], y)
dtype = "float32"
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih + 2 * ph, iw + 2 * pw)).astype(dtype)
no_zero = (range(n), range(ic), (range(ph, ih + ph)), (range(pw, iw + pw)))
pad_np[np.ix_(*no_zero)] = a_np
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
for i in range(oh):
for j in range(ow): |
pad_count = np.sum(
pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] > 0, axis=(2, 3)
)
b_np[:, :, i, j] = np.sum(
pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw], axis=(2, 3)
) / np.maximum(pad_count, 1)
ref_res = np.maximum(b_np, 0.0)
data = a_np
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_flatten_infer_type(executor_kind):
d1, d2, d3, d4 = te.size_var("d1"), te.size_var("d2"), te.size_var("d3"), te.size_var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2 * d3) * d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2 * d3) * 3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gp |
u
def test_pad_infer_type():
n, c, h, w = 1, 2, 3, 4
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32")
n, c, h, w = 4, 6, 3, 5
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((-1, -1), (2, -2), (0, -3), (4, 4)), pad_mode="reflect")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 6, 0, 13), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((-1, -1), (-2, -2), (1, -3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + (-2), c + (-4), h + (-2), w + 8), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(
t, ((1, 1), (2, 2), (3, 3), (4, 4)), pad_value=relay.var("pad_value", "float32")
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
def _get_numpy_pad(dshape, data, pad, pad_value=0):
mod_pad = []
for axis, (pad_x, pad_y) in enumerate(pad):
indices = range(dshape[axis])
if pad_x < 0:
indices = indices[abs(pad_x) :]
pad_x = 0
if pad_y < 0:
indices = indices[:pad_y]
pad_y = 0
data = np.take(data, indices, axis)
mod_pad.append((pad_x, pad_y))
return np.pad(data, tuple(mod_pad), "constant", constant_values=pad_value)
@tv |
m.testing.uses_gpu
def test_pad_run():
def _test_run(dtype):
dshape_list = [(4, 10, 7, 7), (4, 6, 3, 5)]
pad_list = [((1, 1), (2, 2), (3, 3), (4, 4)), ((-1, -1), (2, -2), (0, -2), (4, 4))]
for dshape, pad in zip(dshape_list, pad_list):
x = relay.var("x", shape=dshape)
y = relay.nn.pad(x, pad)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = _get_numpy_pad(dshape, data, pad)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run("float32")
_test_run("int32")
@tvm.testing.uses_gpu
def test_pad_run_dynamic_pad_value():
def _test_run(dtype):
dshape = (4, 6, 3, 5)
pad = ((-1, -1), (2, -2), (0, -2), (4, 4))
data = relay.var("data", shape=dshape, dtype=dtype)
pad_value = relay.var("pad_value", dtype)
pad_data = relay.nn.pad(data, pad, pad_value=pad_value)
f = relay.Function([data, pad_value], pad_data)
data_arr = np.random.uniform(-10, 10, size=dshape).astype(dtype)
pad_value_arr = 2.0
ref_res = _get_numpy_pad(dshape, data_arr, pad, pad_value=pad_value_arr)
for target, dev in tvm.testing.enabled_targets():
result = relay.create_executor(kind="graph", device=dev, target=target).evaluate(f)(
data_arr, pad_value_arr
)
tvm.testing.assert_allclose(result.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run("float32")
_test_run("int32")
@tvm.testing.uses_gpu
@pytest.mark.parametrize("dtype", ["float32", "float16"])
def test_lrn(executor_kind, dtype):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", shape=(n, c, h, w), dtype=dtype) |
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=0.00001, beta=0.75)
"alpha=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), dtype)
shape = (1, 5, 10, 10)
x = relay.var("x", relay.TensorType(shape, dtype))
size = 5
axis = 1
bias = 0.5
alpha = 0.00001
beta = 0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = tvm.topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_l2_normalize(executor_kind):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", shape=(n, c, h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps = 0.001
axis = 1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = tvm.topi.testing.l2_normalize_python(x_data, eps, axis)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def batch |
_flatten(data):
shape = data.shape
target_dim = 1
for i in range(len(shape) - 1):
target_dim = target_dim * shape[i + 1]
return np.reshape(data, (shape[0], target_dim))
@tvm.testing.uses_gpu
def test_batch_flatten():
t1 = relay.TensorType((5, 10, 5))
x = relay.Var("x", t1)
func = relay.Function([x], relay.nn.batch_flatten(x))
data = np.random.rand(5, 10, 5).astype(t1.dtype)
ref_res = batch_flatten(data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def _test_upsampling(layout, method, align_corners=False):
n, c, h, w = te.size_var("n"), 16, 32, 32
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, int(round(h * scale_h)), int(round(w * scale_w)))
else:
return (h, w, c), (int(round(h * scale_h)), int(round(w * scale_w)), c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(
x,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
align_corners=align_corners,
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(
x,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
align_corners=align_corners,
)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref = tvm.topi.testing.resize2d_python(
data,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"align_corners" if align_corners else "asymmetric",
)
for target, dev in tvm.testing.enabled_tar |
gets():
out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsampling():
_test_upsampling("NCHW", "nearest_neighbor")
_test_upsampling("NCHW", "bilinear", True)
_test_upsampling("NHWC", "nearest_neighbor")
_test_upsampling("NHWC", "bilinear", True)
def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"):
n, c, d, h, w = te.size_var("n"), 8, 16, 16, 16
scale_d = 2.0
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCDHW":
return (c, d, h, w), (
c,
int(round(d * scale_d)),
int(round(h * scale_h)),
int(round(w * scale_w)),
)
else:
return (d, h, w, c), (
int(round(d * scale_d)),
int(round(h * scale_h)),
int(round(w * scale_w)),
c,
)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling3d(
x,
scale_d=scale_d,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
coordinate_transformation_mode=coordinate_transformation_mode,
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling3d(
x,
scale_d=scale_d,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
coordinate_transformation_mode=coordinate_transformation_mode,
)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref = tvm.topi.testing.resize3d_python(
data,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[ |
0:3] == "tri" else method,
coordinate_transformation_mode,
)
for target, dev in tvm.testing.enabled_targets():
out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsampling3d():
_test_upsampling3d("NCDHW", "nearest_neighbor", "asymmetric")
_test_upsampling3d("NCDHW", "trilinear", "align_corners")
_test_upsampling3d("NDHWC", "nearest_neighbor", "asymmetric")
_test_upsampling3d("NDHWC", "trilinear", "align_corners")
@tvm.testing.requires_x86
@pytest.mark.skipif(tvm.target.codegen.llvm_version_major() < 8, reason="Requires LLVM 8")
class TestConv2DInt8Intrinsics:
supported_targets = [
"llvm -mcpu=nehalem",
"llvm -mcpu=core-avx2",
"llvm -mcpu=skylake-avx512",
"llvm -mcpu=cascadelake",
]
unsupported_targets = [
"llvm -mcpu=x86-64",
]
data_layout, kernel_layout = tvm.testing.parameters(
("NCHW", "OIHW"),
)
input_channels, output_channels = tvm.testing.parameters(
(1, 16),
(4, 16),
(6, 16),
(8, 4),
(8, 16),
(8, 20),
(17, 29),
)
@tvm.testing.fixture
def fast_int8_intrinsic(self, target):
if "nehalem" in target or "core-avx2" in target or "skylake-avx512" in target:
return "pmaddubs"
elif "cascadelake" in target:
return "vpdpbusd"
else:
assert False, "Target should be Skylake or Cascadelake"
@tvm.testing.fixture
def assembly(
self,
target,
dtypes,
input_channels,
output_channels,
data_layout,
kernel_layout,
):
input_dtype, weight_dtype, output_dtype = dtypes
image_size = (64, 64)
kernel_size = (3, 3)
batch_size = 1
h, w = image_size
if data_layout |
== "NCHW":
data_shape = (batch_size, input_channels, *image_size)
elif data_layout == "NHWC":
data_shape = (batch_size, *image_size, input_channels)
else:
raise ValueError(f"Unsupported data layout: {data_layout}")
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
if kernel_layout == "OIHW":
kernel_shape = (output_channels, input_channels, *kernel_size)
elif kernel_layout == "HWIO":
kernel_shape = (*kernel_size, input_channels, output_channels)
else:
raise ValueError("Not supported")
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(
x,
weight,
kernel_size=kernel_size,
channels=output_channels,
padding=(0, 0, 0, 1),
dilation=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=output_dtype,
)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
return lib.get_source("asm")
@tvm.testing.parametrize_targets(*supported_targets)
@pytest.mark.parametrize(
"dtypes",
[
("uint8", "int8", "int32"),
("int8", "int8", "int32"),
],
)
def test_uses_intrinsic(
self,
fast_int8_intrinsic,
assembly,
):
assert fast_int8_intrinsic in assembly
@tvm.testing.parametrize_targets(*supported_targets)
@pytest.mark.parametrize("dtypes", [("uint8", "uint8", "int32")])
def test_no_intrinsic(
self,
fast_int8_intrinsic,
assembly,
):
assert fast_int8_in |
trinsic not in assembly
@tvm.testing.parametrize_targets(*unsupported_targets)
@pytest.mark.parametrize("dtypes", [("uint8", "int8", "int32")])
def test_uses_vectorized_instruction(self, assembly):
assert "pmulhw" in assembly and "paddd" in assembly
@tvm.testing.uses_gpu
def test_depthwise_conv2d_int8():
input_dtype = "uint8"
weight_dtype = "int8"
output_dtype = "int32"
data_shape = (1, 64, 56, 56)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
kernel_shape = (64, 1, 3, 3)
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(
x,
weight,
kernel_size=(3, 3),
groups=64,
padding=(1, 1),
dilation=(1, 1),
out_dtype=output_dtype,
)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]
llvm_version = tvm.target.codegen.llvm_version_major()
for target in targets:
if llvm_version >= 8:
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
@tvm.testing.uses_gpu
def test_bitserial_conv2d_infer_type():
n, c, h, w = te.size_var("n"), 32, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16"))
w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16"))
y = relay.nn.bitserial_conv2d(x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 32, 222, 222), "int16")
@tvm.testing.uses_gpu
def test_bitpack_infer_type():
o, i, h, w = 32, 32, 128, 128
x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16"))
y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type="uint16", bits=1)
yy = run_infer_type(y)
assert yy.checked_type == re |
lay.TensorType((32, 2, 128, 128, 1), "uint16")
@tvm.testing.uses_gpu
def test_correlation():
def _test_correlation(
data_shape,
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
dtype="float32",
):
data1 = relay.var("data1", relay.ty.TensorType(data_shape, dtype))
data2 = relay.var("data2", relay.ty.TensorType(data_shape, dtype))
y = relay.nn.correlation(
data1,
data2,
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
"NCHW",
)
yy = run_infer_type(y)
padded_height = data_shape[2] + 2 * padding
padded_width = data_shape[3] + 2 * padding
border_size = (kernel_size - 1)
displacement_radius = max_displacement
out_channel = ((2 * displacement_radius) + 1) ** 2
out_height = (padded_height - 2 * border_size + stride1 - 1)
out_width = (padded_width - 2 * border_size + stride1 - 1)
assert yy.checked_type == relay.TensorType(
(data_shape[0], out_channel, out_height, out_width), dtype
)
func = relay.Function([data1, data2], y)
data1_np = np.random.uniform(size=data_shape).astype(dtype)
data2_np = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.correlation_nchw_python(
data1_np,
data2_np,
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data1_np, data2_np
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_correlation(
(1, 3, 10, 10),
kernel_size=1, |
max_displacement=4,
stride1=1,
stride2=1,
padding=4,
is_multiply=True,
)
_test_correlation(
(1, 3, 10, 10),
kernel_size=1,
max_displacement=5,
stride1=1,
stride2=1,
padding=5,
is_multiply=True,
)
_test_correlation(
(5, 1, 4, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=1,
padding=2,
is_multiply=True,
)
_test_correlation(
(5, 1, 6, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=2,
padding=2,
is_multiply=False,
)
_test_correlation(
(5, 1, 11, 11),
kernel_size=5,
max_displacement=1,
stride1=1,
stride2=1,
padding=2,
is_multiply=False,
)
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_conv2d_rocm_sdot4():
d_shape = (1, 64, 56, 56)
w_shape = (64, 64, 3, 3)
padding = (1, 1)
strides = (1, 1)
data_dtype = "int8"
weight_dtype = "int8"
out_dtype = "int32"
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
mod = tvm.IRModule.from_expr(conv2d)
data_np = np.random.uniform(1, 10, d_shape).astype("int8")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("int8")
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_i |
nput("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.conv2d_nchw_python(
data_np.astype("int32"), weight_np.astype("int32"), strides, padding
)
np.testing.assert_equal(out, ref)
def np_float2tvm_bf16(arr):
"""Convert a numpy array of float to a TVM array
of bf16"""
orig = arr.view("<u4")
bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
nparr = np.right_shift(orig + bias, 16).astype("uint16")
return tvm.nd.empty(nparr.shape, "bfloat16").copyfrom(nparr)
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
@tvm.testing.requires_x86
def test_conv2d_nchw_dnnl():
if not tvm.get_global_func("tvm.contrib.dnnl.conv2d", allow_missing=True):
print(
"skip because extern dnnl function is not available, \
built with dnnl=ON"
)
return
d_shape = (1, 64, 56, 56)
w_shape = (64, 64, 3, 3)
padding = (1, 1)
strides = (1, 1)
def get_subgraph(dtype):
data = relay.var("data", shape=d_shape, dtype=dtype)
weight = relay.var("weight", shape=w_shape, dtype=dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=dtype,
)
return conv2d
for t in ["float32", "bfloat16"]:
mod = tvm.IRModule.from_expr(get_subgraph(t))
data_np = np.random.uniform(1, 10, d_shape).astype("float32")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("float32")
ref = tvm.topi.testing.conv2d_nchw_python(data_np, weight_np, strides, padding)
if t == "bfloat16":
data_np = np_float2tvm_bf16(data_np)
weight_np = np_float2tvm_bf16(weigh |
t_np)
target = "llvm -mcpu=skylake-avx512 -libs=dnnl"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
if t == "bfloat16":
out = np_bf162np_float(out)
np.testing.assert_allclose(out, ref, rtol=1e-2)
else:
np.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
@tvm.testing.requires_x86
def test_conv2d_nhwc_dnnl():
if not tvm.get_global_func("tvm.contrib.dnnl.conv2d", allow_missing=True):
print(
"skip because extern dnnl function is not available, \
built with dnnl=ON"
)
return
d_shape = (1, 56, 56, 64)
w_shape = (3, 3, 64, 64)
padding = (1, 1)
strides = (1, 1)
def get_subgraph(dtype):
data = relay.var("data", shape=d_shape, dtype=dtype)
weight = relay.var("weight", shape=w_shape, dtype=dtype)
out_channel = w_shape[3]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[:2],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=dtype,
data_layout="NHWC",
kernel_layout="HWIO",
)
return conv2d
for t in ["float32", "bfloat16"]:
mod = tvm.IRModule.from_expr(get_subgraph(t))
data_np = np.random.uniform(1, 10, d_shape).astype("float32")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("float32")
ref = tvm.topi.testing.conv2d_nhwc_python(data_np, weight_np, strides, padding)
if t == "bfloat16":
data_np = np_float2tvm_bf16(data_np)
weight_np = np_float2tvm_bf16(weight_np)
target = "llvm -mcpu=skylake-avx51 |
2 -libs=dnnl"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
if t == "bfloat16":
out = np_bf162np_float(out)
np.testing.assert_allclose(out, ref, rtol=1e-2)
else:
np.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
def _test_conv2d_int8_alter_dtype(data_dtype, target, dot_product_instr):
def get_conv2d_nchw(
d_shape,
w_shape,
data_dtype,
):
out_dtype = "int32"
strides = (1, 1)
padding = (1, 1)
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype="int8")
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
I, O, H, W = 64, 64, 56, 56
kH = kW = 3
data_shape = (1, I, H, W)
weight_shape = (O, I, kH, kW)
bias_shape = (1, weight_shape[0], 1, 1)
bias = relay.var("bias", shape=bias_shape, dtype="int32")
bias_np = np.random.randint(low=-127, high=128, size=bias_shape).astype("int32")
weight_np = np.random.uniform(-128, 127, size=weight_shape).astype("int8")
conv2d = get_conv2d_nchw(data_shape, weight_shape, data_dtype)
bias_add = relay.add(conv2d, bias)
mod = tvm.IRModule.from_expr(bias_add)
if data_dtype == "uint8":
data_np = np.random.uniform(0, 255, size=data_shape).astype("uint8")
else:
data_np = np.random.uniform(-128, 127, size=data_shape).astype("int8")
params = {"weight": weight_np, "bias": bias_np}
ref = ( |
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
dev = tvm.cpu(0)
with tvm.transform.PassContext(
opt_level=3,
):
lib = relay.build(mod, target=target, params=params)
assert dot_product_instr in lib.lib.get_source("asm")
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
np.testing.assert_equal(out, ref)
@tvm.testing.requires_arm_dot
def test_conv2d_int8_alter_dtype_arm():
_test_conv2d_int8_alter_dtype(
"uint8", "llvm -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod", "sdot"
)
@tvm.testing.requires_cascadelake
def test_conv2d_int8_alter_dtype_vnni():
_test_conv2d_int8_alter_dtype("int8", "llvm -mcpu=cascadelake", "vpdpbusd")
if __name__ == "__main__":
tvm.testing.main() |
""" Support level3 operator test cases.
""" |
import sys
from typing |
import Callable, Optional |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay, te
from tvm.error |
import TVMError
from tvm.relay |
import create_executor, transform
from tvm.relay.testing |
import check_grad, run_infer_type
from utils |
import ref_funcs
executor_kind = tvm.testing.parameter("graph", "vm")
class TestZerosOnes:
config = {"zeros": (relay.zeros, np.zeros), "ones": (relay.ones, np.ones)}
op, ref = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_zeros_ones(self, op, ref):
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp_res = create_executor().evaluate(y).numpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), "float64"))
class TestUnaryIdentity:
config = {
"zeros_like": (relay.zeros_like, np.zeros_like),
"ones_like": (relay.ones_like, np.ones_like),
"ceil": (relay.ceil, np.ceil),
"floor": (relay.floor, np.floor),
"trunc": (relay.trunc, np.trunc),
"round": (relay.round, np.round),
"abs": (relay.abs, np.abs),
"copy": (relay.copy, None),
"negative": (relay.negative, np.negative),
"sign": (relay.sign, np.sign),
}
op, ref = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_unary_identity(self, op, ref):
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype("float32")
op_res = create_executor().evaluate(y, {x: relay.const(data)})
ref_res = ref(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext |
()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_sliding_window():
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 1, 15, 10, 3, 4, 5), "float32")
data = np.random.rand(2, 3, 32, 32).astype("float32")
intrp = create_executor()
result = intrp.evaluate(y, {x: relay.const(data)})
result_np = result.numpy()
assert result_np.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result_np[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result_np[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result_np[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1.0, 4.0)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = np.clip(data, 1.0, 4.0)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_fixed_point_multiply():
a = relay.var("a", relay.TensorType((10, 4), "int32"))
y = relay.fixed_point_multiply(a, 1073741824, -3)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "int32")
data = 23 * np.ones((10, 4)).astype("int32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = np.ones((10, 4)).astype("int32")
np.testing.assert_allclose(op_res.numpy(), ref_res, atol=1)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype("float3 |
2") * 1000
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.numpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.numpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.numpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
class TestSqueeze:
shape, dtype, axis = tvm.testing.parameters(
((1, 3, 2, 5), "float32", None),
((1, 3, 1), "float32", [0]),
((1, 2, 1, 2, 1), "float32", [0, 2]),
)
def test_squeeze(self |
, shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
op_res = create_executor().evaluate(squeeze, {x: relay.const(data)})
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_transpose_infer_type():
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((100, t, n), "float32")
def test_transpose(target, dev, executor_kind):
dshape = (2, 3, 4)
axes = (0, 2, 1)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axe |
s_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 2000), "float32")
class TestReshape:
shape, newshape, oshape = tvm.testing.parameters(
((2, 3, 4), (8, 3), (8, 3)),
((4, 7), (2, 7, 2), (2, 7, 2)),
((2, 3, 4), (4, 0, 2), (4, 3, 2)),
((2, 3, 4), (2, 0, 0), (2, 3, 4)),
((2, 3, 4), (0, -1), (2, 12)),
((2, 3, 4), (-1, 0), (8, 3)),
((2, 3, 4), (2, -2), (2, 3, 4)),
((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1)),
((2, 3, 4), (-3, 4), (6, 4)),
((2, 3, 4, 5), (-3, -3), (6, 20)),
((2, 3, 4), (0, -3), (2, 12)),
((2, 3, 4), (-3, -2), (6, 4)),
((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4)),
((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4)),
((1,), (), ()),
)
def test_reshape(self, target, dev, executor_kind, shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_reshape_fail():
with pytest.raises(TVMError) as reshape_err:
x = relay.var("x", relay.TensorType([2, 3], "float32" |
))
z = relay.reshape(x, [7])
zz = run_infer_type(z)
def test_reshape_like_infer_type():
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1, 6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
x = relay.var("x", relay.TensorType((1, 2, 3, 4), "float32"))
y = relay.var("y", relay.TensorType((1, 6, 5), "float32"))
z = relay.reshape_like(x, y, lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6, 4), "float32")
x = relay.var("x", relay.TensorType((1, 2, 3, 4), "float32"))
y = relay.var("y", relay.TensorType((2, 3, 4, 1, 6), "float32"))
z = relay.reshape_like(x, y, rhs_end=3)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((2, 3, 4), "float32")
z = relay.reshape_like(x, y, rhs_begin=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((4, 1, 6), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((5, 6), "float32"))
z = relay.var("z", relay.TensorType((4,), "float32"))
w = relay.reshape_like(x, y, lhs_end=3)
w = relay.reshape_like(w, z, lhs_begin=2)
w = run_infer_type(w)
assert w.checked_type == relay.TensorType((5, 6, 4), "float32")
class TestReshapeLike:
shape, oshape, shape_like, reshape_like_kwargs = tvm.testing.parameters(
((2, 3, 4), (1, 8, 3), None, {}),
((4, 7), (2, 7, 2), None, {}),
((1, 2, 3, 4) |
, (1, 6, 4), (1, 6, 5), dict(lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)),
)
def test_reshape_like(
self, target, dev, executor_kind, shape, oshape, shape_like=None, reshape_like_kwargs={}
):
if shape_like is None:
shape_like = oshape
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=shape_like).astype("float32")
ref_res = np.reshape(x_data, oshape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(shape_like, "float32"))
z = relay.reshape_like(x, y, **reshape_like_kwargs)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestTakeInferType:
d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3")
d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6")
dshape, indices_shape, oshape, axis = tvm.testing.parameters(
((d1,), (1,), (1,), 0),
((4,), (d1, d2), (d1, d2), None),
((3, 3, 3), (1, d2), (1, d2), None),
((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0),
((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1),
((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2),
)
def test_take(self, dshape, indices_shape, oshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
class TestTake:
src_shape, indices_src, axis, mode, indices_dtype = tvm.testing.parameters(
((4,), [1], None, "clip", "int32"), |
((4,), [[0, 1, 2, 3]], None, "clip", "int32"),
((3, 3, 3), [[11, 25]], None, "clip", "int32"),
((4,), [[0, 1], [2, 3]], None, "clip", "int32"),
((4,), [1], 0, "clip", "int32"),
((2, 2), [[[1, 0], [0, 1]]], 0, "clip", "int32"),
((2, 2), [[[1, 0], [0, 1]]], 1, "clip", "int32"),
((4, 3, 5, 6), [[2, 1, 0, 0]], -2, "clip", "int32"),
((3, 4), [-5, 20], None, "clip", "int32"),
((3, 4), [-5, 20], None, "wrap", "int32"),
((3, 4), [-1, 2], 0, "clip", "int32"),
((3, 4), [-1, 2], 0, "wrap", "int32"),
((3, 4), [-1, 2], 1, "clip", "int32"),
((3, 4), [-1, 2], 1, "wrap", "int32"),
((3, 3, 3), [[11, 25]], None, "fast", "int32"),
((3, 4), [0, 2], 0, "fast", "int32"),
((3, 4), [0, 2], 1, "fast", "int32"),
((3, 4), [1, 2], 1, "clip", "uint32"),
((3, 4), [1, 2], 1, "wrap", "uint16"),
((3, 3, 3), [1, 2], None, "fast", "uint16"),
((3, 4), [0, 2], 0, "fast", "uint8"),
)
@tvm.testing.known_failing_targets("vulkan")
def test_take(
self, target, dev, executor_kind, src_shape, indices_src, axis, mode, indices_dtype
):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, indices_src
)
indices_src = indices_src.astype("int32")
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rto |
l=1e-5)
class TestSplitInferType:
idxd = tvm.tir.indexdiv
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
axis = te.var("axis")
dshape, indices_or_sections, ret_type, axis = tvm.testing.parameters(
(
(5, 5, 2, 2),
5,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
]
)
),
1,
),
(
(5, 5, 2, 2),
5,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
]
)
),
0,
),
(
(d1, d2, d3, d4),
4,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
]
)
),
2,
),
(
(d1, d2, d3, d4),
2,
relay.ty.TupleType( |
tvm.runtime.convert(
[
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
]
)
),
0,
),
(
(d1, d2, d3, d4),
(2, 4, 7),
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"),
]
)
),
1,
),
(
(d1, d2, d3, d4),
tuple(np.array([2, 4, 7]).astype(np.int64)),
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"),
]
)
),
1,
),
)
def test_split(self, dshape, indices_or_sections, ret_type, axis):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
def test_full_infer_type():
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
"shape=" in y.astext()
yy = run_infer_type(y)
assert y |
y.checked_type == relay.TensorType((1, 2), "int8")
class TestFull:
fill_value, arr_shape, dtype = tvm.testing.parameters(
(4, (1, 3, 4, 4), "int32"),
(4, (1, 3, 4, 4), "int64"),
(4.0, (1, 4), "float32"),
)
def test_full(self, target, dev, executor_kind, fill_value, arr_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, arr_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(arr_shape, fill_value, dtype=dtype)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np.array(fill_value, dtype)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_full_like(self, target, dev, executor_kind, arr_shape, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=arr_shape).astype(dtype)
x = relay.var("x", relay.TensorType(arr_shape, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, np.array(fill_value, dtype)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_full_like_infer_type():
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
def test_infer_type_leaky_rel |
u(target, dev, executor_kind):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestInferTypePrelu:
dtype = tvm.testing.parameter("float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
data, alpha, axis, output = tvm.testing.parameters(
((n, c, h, w), (c,), 1, (n, c, h, w)),
((n, h, w, c), (c,), 3, (n, h, w, c)),
((n, c, h, w), None, 1, (n, c, h, w)),
((n, h, w, c), None, 3, (n, h, w, c)),
((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2)),
((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3)),
((1, 3, 2, 2), None, 1, (1, 3, 2, 2)),
((1, 2, 2, 3), None, 3, (1, 2, 2, 3)),
)
def test_infer_type_prelu(self, target, dev, executor_kind, data, alpha, axis, output, dtype):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type |
== relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data >= 0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data >= 0) * x_data
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, a_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestArange:
dtype = tvm.testing.parameter("float32")
start, stop, step = tvm.testing.parameters(
(None, 20, None),
(None, 20, 2),
(1, 20, None),
(1, 20, 2),
(1, 20.5, None),
(1, 20, 3),
(20, 1, -1),
)
def test_arange(self, target, dev, executor_kind, start, stop, step, dtype):
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(ste |
p, dtype=dtype),
)
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)()
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestMeshgrid:
lengths, indexing = tvm.testing.parameters(
([3, 5], "ij"),
([4, 2], "xy"),
([3, 5, 2], "ij"),
([3, 1, 5], "xy"),
([3, 5, 0], "ij"),
)
def test_meshgrid(self, target, dev, executor_kind, lengths, indexing="ij"):
input_vars = []
input_data = []
for i, length in enumerate(lengths):
input_name = "x_{}".format(i)
if length == 0:
input_vars.append(relay.var(input_name, relay.scalar_type("float32")))
input_data.append(np.array(1, "float32"))
else:
input_vars.append(relay.var(input_name, relay.TensorType((length,), "float32")))
input_data.append(np.arange(length).astype("float32"))
z = relay.meshgrid(input_vars, indexing=indexing).astuple()
func = relay.Function(input_vars, z)
ref_res = np.meshgrid(*input_data, indexing=indexing)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*input_data
)
assert len(op_res) == len(ref_res)
for i in range(len(op_res)):
tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5)
class TestTile:
dshape, reps = tvm.testing.parameters(
((2, 3, 4), (3, 2, 1)),
((2, 3, 4), (1, 2)),
((2, 3), (3, 2, 1)),
)
def test_tile(self, target, dev, executor_kind, dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref |
_res = np.tile(x_data, reps=reps)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestRepeat:
dshape, repeats, axis = tvm.testing.parameters(
((3,), 2, 0),
((3, 10), 2, -1),
((3, 2, 4), 3, 1),
)
def test_repeat(self, target, dev, executor_kind, dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestStack:
dshapes, axis = tvm.testing.parameters(
([(2,), (2,), (2,)], -1),
([(2,), (2,), (2,)], 0),
([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1),
([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1),
([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4),
)
expr_type = tvm.testing.parameter("tuple", "list", "tuple_expr")
@tvm.testing.fixture
def ref_data(self, dshapes, axis):
np_in = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
np_out = np.stack(np_in, axis=axis)
return np_in, np_out
@tvm.testing.fixture
def input_expr(self, dshapes, axis, expr_type, ref_data):
input_vars = [relay.var("input", relay.TensorType(shape, "float32")) for shape in dshapes]
if expr_type == "tuple":
input_expr = relay.Tuple(input_vars)
elif expr_type == "list":
input_expr = input_vars
elif expr_type == "tuple_expr":
np_in, np_out = ref_data
x = relay.Var("x")
input_expr = relay.Let(x, relay.Tu |
ple([relay.const(inp) for inp in np_in]), x)
else:
raise ValueError(f"Unknown expr_type '{expr_type}'")
return input_expr
def test_stack(self, target, dev, executor_kind, input_expr, ref_data, axis):
z = relay.stack(input_expr, axis=axis)
inp_vars = relay.analysis.free_vars(z)
func = relay.Function(inp_vars, z)
np_in, np_out = ref_data
relay_args = np_in if inp_vars else []
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*relay_args
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)
class TestReverse:
dshape, axis = tvm.testing.parameters(
((2, 3, 4), 1),
((4, 7), 0),
((2, 3, 4), -1),
)
def test_reverse(self, target, dev, executor_kind, dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_reverse_sequence(target, dev, executor_kind):
def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths_data = np.array(seq_lengths).astype("int32")
x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype)))
z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)
zz = run_infer_type(z)
assert zz.checked_type == x.type_annotation
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rt |
ol=1e-5)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
],
[
[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]],
],
]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]],
],
[
[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], |
[51, 52, 53]],
],
]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert (
"For reverse_sequnece seq_lengths size should match with dimension of batch axis,"
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
)
def ref_scatter(data, indices, updates, axis=0):
idx = np.indices(indices.shape).reshape(indices.ndim, -1)
updated_idx = np.copy(idx)
indices = indices.reshape(-1)
for i in range(len(indices)):
updated_idx[axis, i] = indices[i]
scattered = np.copy(data)
scattered[tuple(updated_idx)] = updates[tuple(idx)]
return scattered
def test_scatter(target, dev, executor_kind):
def verify_scatter(dshape, ishape, axis=0, indices_dtype="int64"):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, indices_dtype))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(0, dshape[axis] - 1, ishape).astype(indices_dtype)
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, indices_np, updates_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_scatter((10,), (10,), 0)
verify_scatter((10, 5), (10, 5), -2)
verify_scatter((10, 5), (10, 5), -1)
verify_scatter((10, 5), (3, 5), 0)
verify_scatter((12, 4), (7, 2), 1)
verify_scatter((2, 3, 4), (1, |
3, 4), 0)
verify_scatter((2, 3, 4), (2, 1, 4), 1)
verify_scatter((2, 3, 4), (2, 3, 1), 2)
verify_scatter((4, 2, 1), (1, 1, 1), 0)
verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3, indices_dtype="uint32")
class TestDynamicScatter:
dshape, ishape, axis = tvm.testing.parameters(
((10,), (10,), 0),
((10, 5), (10, 5), -2),
((10, 5), (10, 5), -1),
((10, 5), (3, 5), 0),
((12, 4), (7, 2), 1),
((2, 3, 4), (1, 3, 4), 0),
((2, 3, 4), (2, 1, 4), 1),
((2, 3, 4), (2, 3, 1), 2),
((4, 2, 1), (1, 1, 1), 0),
((2, 3, 4, 5), (1, 3, 4, 5), 0),
((6, 3, 4, 5), (2, 3, 4, 5), 1),
((2, 3, 8, 5), (2, 3, 1, 1), 2),
((16, 16, 4, 5), (16, 16, 4, 5), 3),
)
@pytest.mark.parametrize("executor_kind", ["vm"])
def test_dynamic_scatter(self, target, dev, executor_kind, dshape, ishape, axis):
d = relay.var("d", relay.TensorType([relay.Any() for i in range(len(dshape))], "float32"))
i = relay.var("i", relay.TensorType([relay.Any() for i in range(len(ishape))], "int64"))
u = relay.var("u", relay.TensorType([relay.Any() for i in range(len(ishape))], "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(op_res.nump |
y(), ref_res, rtol=1e-5)
class TestScatterAdd:
dshape, ishape, axis, dtype, indice_dtype = tvm.testing.parameters(
((10,), (10,), 0, "int32", "int64"),
((1000,), (1000,), 0, "int32", "int64"),
((10, 5), (10, 5), -2, "float32", "int64"),
((10, 5), (10, 5), -1, "float32", "int64"),
((10, 5), (3, 5), 0, "float32", "int64"),
((12, 4), (7, 2), 1, "float32", "int64"),
((2, 3, 4), (1, 3, 4), 0, "float32", "int64"),
((2, 3, 4), (2, 1, 4), 1, "float32", "int64"),
((2, 3, 4), (2, 3, 1), 2, "float32", "int64"),
((2, 3, 4, 5), (1, 3, 4, 5), 0, "float32", "int64"),
((6, 3, 4, 5), (2, 3, 4, 5), 1, "float32", "int64"),
((2, 3, 8, 5), (2, 3, 1, 1), 2, "float32", "int64"),
((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32", "int64"),
((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32", "uint32"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dshape, ishape, axis, dtype, indice_dtype):
data_np = np.random.uniform(size=dshape).astype(dtype)
updates_np = np.random.uniform(size=ishape).astype(dtype)
indices_np = np.random.randint(0, dshape[axis] - 1, ishape).astype(indice_dtype)
out_np = np.copy(data_np)
for index in np.ndindex(*indices_np.shape):
new_index = list(index)
new_index[axis] = indices_np[index]
out_np[tuple(new_index)] += updates_np[index]
return data_np, updates_np, indices_np, out_np
@tvm.testing.known_failing_targets("vulkan")
def test_scatter_add(self, target, dev, ref_data, dshape, ishape, axis, dtype, indice_dtype):
d = relay.var("d", relay.TensorType(shape=[relay.Any() for _ in dshape], dtype=dtype))
i = relay.var(
"i", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=indice_dtype)
)
u = relay.var("u", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=dtype))
z = relay.op.scatter_add(d, i, u, axis) |
func = relay.Function([d, i, u], z)
data_np, updates_np, indices_np, out_np = ref_data
verify_func(target, dev, func, [data_np, indices_np, updates_np], out_np)
@pytest.mark.parametrize(
"data, axis, indices, ref_res",
[
([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),
([[1, 2], [3, 4]], -1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),
(
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
0,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]],
),
(
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
-3,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
1,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
-2,
[[[2, 2, 0], [1, 0 |
, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
-2,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[0.3050, 1.6986, 1.1034],
[0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912],
[0.0835, -1.3915, -1.0720],
],
[
[0.1694, -0.6091, -0.6539],
[-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078],
[-0.5700, -1.0302, 0.1558],
],
],
2,
[
[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],
],
[
[
[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835],
],
[
[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084 |
, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558],
],
],
),
(
[
[
[0.3050, 1.6986, 1.1034],
[0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912],
[0.0835, -1.3915, -1.0720],
],
[
[0.1694, -0.6091, -0.6539],
[-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078],
[-0.5700, -1.0302, 0.1558],
],
],
-1,
[
[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],
],
[
[
[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835],
],
[
[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558],
],
],
),
],
)
def test_gather(target, dev, executor_kind, data, axis, indices, ref_res):
def verify_gather(data, axis, indices, ref_res):
data = np.asarray(data, dtype="float32")
indices = np.asarray(indices, dtype="int32")
ref_res = np.asarray(ref_res)
d = relay.var("x", relay.TensorType(data.shape, "float32"))
i = relay.var("y", relay.TensorType(indices.shape, "int32"))
z = relay.gather(d, axis, i)
func = relay.Function([d, i], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data, in |
dices
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_gather(data, axis, indices, ref_res)
def test_gather_nd(target, dev, executor_kind):
def verify_gather_nd(xshape, yshape, y_data, batch_dims=0, indices_dtype="int32"):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, indices_dtype))
z = relay.gather_nd(x, y, batch_dims)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
if y_data:
y_data = np.array(y_data, dtype=indices_dtype)
else:
y_data = np.random.randint(low=0, high=2, size=yshape, dtype=indices_dtype)
ref_res = ref_funcs.gather_nd(x_data, y_data, batch_dims)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
verify_gather_nd((2, 2, 2), (1, 2), [[1, 0]], 1)
verify_gather_nd((2, 2, 2), (1, 2, 1), [[[1], [0]]], 1)
verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1)
verify_gather_nd((2, 2, 2), (1, 2), None, 1)
verify_gather_nd((2, 2, 2), (2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (3, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (1, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (3, 2, 1), None, 1)
verify_gather_nd((2, 2, 3, 2), (2, 2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (1, 2, 3), None, 1)
verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2), None, 2)
verify_gather_nd((3, 2 |
, 2, 3, 4), (1, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2, 1), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2, indices_dtype="uint8")
verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1, indices_dtype="uint32")
def _verify_infiniteness_ops(relay_op, ref_op, target="llvm", dev=None):
for dtype in ["float32", "float16", "float16", "int32", "int16"]:
shape = (2, 8, 8)
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay_op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "bool")
data = np.random.uniform(size=shape).astype(dtype)
if dtype.startswith("float"):
data.ravel()[
np.random.choice(data.size, int(data.size * 0.5), replace=False)
] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
op_res = create_executor(target=target, device=dev).evaluate(y, {x: data})
ref_res = ref_op(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
@tvm.testing.requires_gpu
def test_isfinite():
for target, dev in tvm.testing.enabled_targets():
if target not in ["llvm", "cuda"]:
continue
_verify_infiniteness_ops(relay.isfinite, np.isfinite, target=target, dev=dev)
@tvm.testing.requires_gpu
def test_isinf():
for target, dev in tvm.testing.enabled_targets():
if target not in ["llvm", "cuda"]:
continue
_verify_infiniteness_ops(relay.isinf, np.isinf, target=target, dev=dev)
def test_unravel_index(target, dev, executor_kind):
def verify_unravel_index(indices, shape, dtype):
x_data = np.array(indices).astype(dtype)
y_data = np.array(shape).astype(dtype)
x = relay.var("x", relay.TensorType(x_data.shape, dtype))
y = relay.v |
ar("y", relay.TensorType(y_data.shape, dtype))
z = relay.unravel_index(x, y)
zz = run_infer_type(z)
if len(x_data.shape) == 1:
out_shape = [y_data.shape[0], x_data.shape[0]]
else:
out_shape = [y_data.shape[0]]
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
func = relay.Function([x, y], z)
ref_res = np.unravel_index(x_data, y_data)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
for dtype in ["int64", "int32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)
def test_sparse_to_dense(target, dev, executor_kind):
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape, b, c)
zz = run_infer_type(d)
assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))
func = relay.Function(ar |
gs, d)
f = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)
if default_value is None:
op_res = f(sparse_indices_data, sparse_values_data)
else:
op_res = f(sparse_indices_data, sparse_values_data, default_value_data)
tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0])
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3])
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
)
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
)
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
)
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
class TestSparseReshape:
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np = tvm.testing.parameters(
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([2, 3, 6], dtype=np.int32),
np.array([9, -1], dtype=np.int32),
),
(
np.array(
[[0, 0, 0, 0], [0, 0, 1, 2], [0, 1, 0, 3], [1, 0, 0, 4], [1, 2, 3, 6]],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtyp |
e=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([9, 4], dtype=np.int32),
np.array([2, -1, 6], dtype=np.int32),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([25], dtype=np.int32),
np.array([5, 5], dtype=np.int32),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([500, 20], dtype=np.int32),
np.array([500, -1], dtype=np.int32),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int32),
np.array([], dtype=np.int32),
np.array([4], dtype=np.int32),
np.array([2, -1], dtype=np.int32),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.a |
rray([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int32),
np.array([], dtype=np.int32),
np.array([3, 6], dtype=np.int32),
np.array([-1, 2], dtype=np.int32),
),
)
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
sparse_indices_np: np.ndarray,
prev_shape_np: np.ndarray,
new_shape_np: np.ndarray,
):
"""
This function calculates the expected output of sparseshape operator given the inputs.
"""
new_sparse_indices = np.ones(
(sparse_indices_np.shape[0], new_shape_np.shape[0]), dtype=sparse_indices_np.dtype
)
multipliers = np.ones(prev_shape_np.shape[0])
dividers = np.ones(new_shape_np.shape[0])
total_ele = np.prod(prev_shape_np)
division_total_ele = 1
for i in range(new_shape_np.shape[0]):
if new_shape_np[i] == -1:
continue
division_total_ele *= new_shape_np[i]
for i in range(prev_shape_np.shape[0] - 2, -1, -1):
multipliers[i] = prev_shape_np[i + 1] * multipliers[i + 1]
for i in range(len(new_shape_np)):
if new_shape_np[i] == -1:
new_shape_np[i] = total_ele
if np.array_equal(prev_shape_np, new_shape_np):
return sparse_indices_np, prev_shape_np
for i in range(new_shape_np.shape[0] - 2, -1, -1):
dividers[i] = new_shape_np[i + 1] * dividers[i + 1]
for row_num, sparse_row in enumerate(sparse_indices_np):
flat_idx = 0
if len(sparse_indices_np.shape) != 1:
for i, ele in enumerate(sparse_row):
flat_idx += sparse_row[i] * multipliers[i]
else:
flat_idx += sparse_row
if len(new_sparse_indices.shape) != 1:
for i in range |
(new_sparse_indices.shape[1]):
new_sparse_indices[row_num][i] = flat_idx
flat_idx = flat_idx % dividers[i]
else:
new_sparse_indices[row_num] = flat_idx
return new_sparse_indices, new_shape_np
@tvm.testing.known_failing_targets("vulkan")
def test_sparse_reshape(
self,
target,
dev,
ref_res,
sparse_indices_np,
sparse_values_np,
prev_shape_np,
new_shape_np,
use_dyn,
):
if use_dyn:
sparse_indices = relay.var(
"sparse_indices",
shape=[relay.Any(), relay.Any()],
dtype=str(sparse_indices_np.dtype),
)
prev_shape = relay.var(
"prev_shape",
shape=[relay.Any()],
dtype=str(prev_shape_np.dtype),
)
new_shape = relay.var(
"new_shape",
shape=[relay.Any()],
dtype=str(new_shape_np.dtype),
)
else:
sparse_indices = relay.var(
"sparse_indices",
relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),
)
prev_shape = relay.var(
"prev_shape", relay.TensorType(prev_shape_np.shape, str(prev_shape_np.dtype))
)
new_shape = relay.var(
"new_shape", relay.TensorType(new_shape_np.shape, str(new_shape_np.dtype))
)
z = relay.op.sparse_reshape(sparse_indices, prev_shape, new_shape).astuple()
func = relay.Function([sparse_indices, prev_shape, new_shape], z)
outputs = run_infer_type(z)
new_sparse_indices_infer_type, new_shape_infer_type = (
outputs.checked_type.fields[0].dtype,
outputs.checked_type.fields[1].dtype,
)
assert new_sparse_indices_infer_type == sparse_indices_np.dtype
assert new_shape_infer_type == new_shape_np.dtype |
verify_func(
target,
dev,
func,
[sparse_indices_np, prev_shape_np, new_shape_np],
ref_res,
)
class TestSegmentSum:
data_np, segment_ids_np, num_segments = tvm.testing.parameters(
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 1, 1, 0, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((6, 4, 5)),
np.array([2, 0, 1, 0, 3, 2], dtype=np.int64),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([5, 0, 1, 0, 3, 6, 8, 7, 7], dtype=np.int64),
9,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
)
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
data_np: np.ndarray,
segment_ids_np: np.ndarray,
num_segments: Optional[int],
):
"""
This function calculates the expected output of segment_sum operator given the inputs.
"""
if not num_segments:
num_segments = np.unique(segment_ids_np).shape[0]
result = np.zeros((num_segments,) + data_np.shape[1:], data_np.dtype)
for i, index in enumerate(segment_ids_np):
result[index] += data_np[i]
return result
@tvm.testing.known_fa |
iling_targets("vulkan")
def test_segment_sum(
self,
target,
dev,
ref_res: np.ndarray,
data_np: np.ndarray,
segment_ids_np: np.ndarray,
num_segments: Optional[int],
use_dyn: bool,
):
"""
This function verifies the relay output of segment_sum with its expected output.
"""
if use_dyn:
data = relay.var(
"data",
shape=[relay.Any() for _ in data_np.shape],
dtype=str(data_np.dtype),
)
segment_ids = relay.var(
"segment_ids",
shape=[relay.Any()],
dtype=str(segment_ids_np.dtype),
)
else:
data = relay.var(
"data",
relay.TensorType(data_np.shape, str(data_np.dtype)),
)
segment_ids = relay.var(
"segment_ids", relay.TensorType(segment_ids_np.shape, str(segment_ids_np.dtype))
)
z = relay.op.segment_sum(data, segment_ids, num_segments)
func = relay.Function([data, segment_ids], z)
segment_sum_result = run_infer_type(z)
assert segment_sum_result.checked_type.dtype == data_np.dtype
verify_func(
target,
dev,
func,
[data_np, segment_ids_np],
ref_res,
)
def verify_func(target, dev, func, data, ref_res, rtol=1e-5, atol=1e-7, kinds=["vm"]):
assert isinstance(data, list)
for kind in kinds:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(*data)
if isinstance(op_res, tvm.runtime.container.ADT):
assert len(op_res) == len(
ref_res
), "Outputs from TVM and Python implementation must be equal "
for op_result, ref_result in zip(op_res, ref_res):
tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=rtol, atol |
=atol)
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
relay.backend.te_compiler.get().clear()
def test_adv_index(target, dev, executor_kind):
def verify_adv_index(data_shape, index_shapes):
dtype = "float32"
inputs = [relay.var("data", relay.TensorType(data_shape, dtype))]
np_data = np.random.uniform(size=data_shape).astype(dtype)
np_indices = []
for i, index_shape in enumerate(index_shapes):
limit = data_shape[i]
np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype("int64"))
inputs.append(relay.var("index_{}".format(i), relay.TensorType(index_shape, "int64")))
np_out = np_data[tuple(np_indices)]
np_args = [np_data] + np_indices
out = relay.op.adv_index(inputs)
func = relay.Function(inputs, out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*np_args
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)
verify_adv_index((10, 5), [(3, 4), (3, 1)])
verify_adv_index((10, 5), [(1, 4), (3, 1)])
verify_adv_index(
(10, 5),
[
(2,),
],
)
verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)])
scanops_supported = {"cumsum": relay.op.cumsum, "cumprod": relay.op.cumprod}
def run_binop_tests(
target,
dev,
executor_kind,
binop_type: str,
gt_func: Callable[..., np.array],
identity_value: int,
):
def assert_relay_scanop(
data_np: np.array,
np_out: np.array,
axis: int = None,
out_dtype: str = None,
rtol: float = 1e-5,
atol: float = 1e-5,
exclusive: bool = False,
):
inp = relay.var("data", relay.TensorType(data_np.shape, str(data_np.dtype)))
if binop_type not in scanops_supported.keys():
raise ValueError(f"Unknown function {binop_type}. Options: {scanops_supported.keys()}") |
out = scanops_supported[binop_type](inp, axis, out_dtype, exclusive=exclusive)
func = relay.Function([inp], out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol)
data = np.array([2, 3, 0])
assert_relay_scanop(data, gt_func(data))
assert_relay_scanop(data, gt_func(data), out_dtype="int64")
data = np.random.randn(10, 10)
assert_relay_scanop(data, gt_func(data))
assert_relay_scanop(data, gt_func(data, axis=0), axis=0)
assert_relay_scanop(data, gt_func(data, axis=1), axis=1)
data = np.random.randn(10, 5, 10).astype("float32")
assert_relay_scanop(data, gt_func(data), rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=0), axis=0, rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=1), axis=1, rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=-1), axis=-1, rtol=1e-4, atol=1e-4)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
assert_relay_scanop(data, gt_func(data, dtype=np.int32))
assert_relay_scanop(data, gt_func(data, dtype="int64"), out_dtype="int64")
data = np.random.randint(-100, 100, size=(10, 10)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True)
expected_result = np.roll(gt_func(data, axis=0), 1, axis=0)
expected_result[0, :] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True, axis=0)
expected_result = np.roll(gt_func(data, axis=1), 1, axis=1)
expected_result[:, 0] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(target, dev, executor_kind):
run_binop_tests(
target, dev, executor_kind, binop_type="cumsum", gt_func=np.cumsum, identity_value=0 |
)
@tvm.testing.parametrize_targets
def test_cumprod(target, dev, executor_kind):
run_binop_tests(
target, dev, executor_kind, binop_type="cumprod", gt_func=np.cumprod, identity_value=1
)
@tvm.testing.parametrize_targets
def test_scatter_nd(target, dev, executor_kind):
def test_scatter_nd_large_shape():
def before():
data = relay.const(np.zeros((1, 900, 300), dtype="float32"), dtype="float32")
indices = relay.const(np.ones((3, 1, 900, 300), dtype="int64"), dtype="int64")
update = relay.const(np.ones((1, 900, 300), dtype="float32"), dtype="float32")
b = relay.op.scatter_nd(data, indices, update)
return relay.Function(relay.analysis.free_vars(b), b)
passes = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
]
)
before_mod = tvm.IRModule.from_expr(before())
with tvm.transform.PassContext(opt_level=3):
after_mod = passes(before_mod)
test_scatter_nd_large_shape()
def verify_scatter_nd(
data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5
):
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", shape=indices_np.shape, dtype=str(indices_np.dtype))
updates = relay.var("updates", shape=updates_np.shape, dtype=str(updates_np.dtype))
out = relay.op.scatter_nd(data, indices, updates, mode)
func = relay.Function([data, indices, updates], out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, indices_np, updates_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
def verify_scatter_nd_with_stack(
data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5
):
data = relay.var("data", shape=data_np.shape, dtype=str(d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.