text
stringlengths 1
2.05k
|
---|
ata_np.dtype))
indices_vars = [
relay.var("ind%d" % i, shape=v.shape, dtype=str(v.dtype))
for i, v in enumerate(indices_np)
]
updates = relay.var("updates", shape=updates_np.shape, dtype=str(updates_np.dtype))
indices = relay.op.stack(indices_vars, axis=0)
out = relay.op.scatter_nd(data, indices, updates, mode)
func = relay.Function(
[data, updates] + indices_vars,
out,
)
fargs = [data_np, updates_np]
for a in indices_np:
fargs.append(a)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*fargs
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
for indice_dtype in ["uint8", "uint16", "uint32"]:
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]]).astype(indice_dtype)
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
verify_scatter_nd(data, indices, updates, out)
verify_scatter_nd_with_stack(data, indices, updates, out)
data = np.zeros((2, 2, 2, 2)).astype("int64")
indices = np.array([[0, 1], [1, 1]]).astype(indice_dtype)
updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])
verify_scatter_nd(data, indices, updates, out)
verify_scatter_nd_with_stack(data, indices, updates, out)
indices = np.array([[1, 0, 0]]).astype(indice_dtype)
updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype("float32")
shape = (2, 1560)
data = np.zeros(shape).astype("float32")
out = data.copy()
out[1, :] += updates[0, :]
out[0, :] += updates[1, :]
out[0, :] += updates[2, :]
verify_scatter_nd(data, indices, updates, out, mode="add")
verify_scatter_nd_with_stack(data, indices, upda |
tes, out)
for mode in ["add", "update"]:
indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(
indice_dtype
)
updates = np.ones((5, 3)).astype("float64")
shape = (2, 7, 3)
data = np.random.random(shape).astype("float64")
out = data.copy()
for i in range(indices.shape[1]):
for j in range(updates.shape[1]):
if mode == "add":
out[indices[0, i], indices[1, i], j] += updates[i, j]
elif mode == "update":
out[indices[0, i], indices[1, i], j] = updates[i, j]
verify_scatter_nd(data, indices, updates, out, mode)
verify_scatter_nd_with_stack(data, indices, updates, out, mode)
def test_unique(target, dev):
def calc_numpy_unique(data, is_sorted=False):
uniq, index, inverse, counts = np.unique(
data, return_index=True, return_inverse=True, return_counts=True
)
num_uniq = np.array([len(uniq)]).astype("int32")
if not is_sorted:
order = np.argsort(index)
reverse_order = np.argsort(order)
uniq = uniq[order].astype(data.dtype)
inverse = np.array([reverse_order[i] for i in inverse]).astype("int32")
counts = counts[order].astype("int32")
index = np.sort(index)
return [
uniq.astype(data.dtype),
index.astype("int32"),
inverse.astype("int32"),
num_uniq,
counts,
]
def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()], dtype))
else:
x = relay.var("x", relay.TensorType([n], dtype))
outs = relay.unique(x, is_sorted, return_counts)
outs = outs.astuple()
func = relay.Function([x], outs)
x_data = np.random.r |
andint(50, size=n).astype(dtype)
if is_dyn:
backend = "vm"
else:
backend = "graph"
mod = tvm.ir.IRModule.from_expr(func)
tvm_res = relay.create_executor(backend, mod=mod, device=dev, target=target).evaluate()(
x_data
)
np_res = calc_numpy_unique(
x_data, is_sorted
)
num_unique = np_res[3][0]
assert num_unique == tvm_res[3].numpy()[0]
tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5)
tvm.testing.assert_allclose(tvm_res[1].numpy()[:num_unique], np_res[1], rtol=1e-5)
tvm.testing.assert_allclose(tvm_res[2].numpy(), np_res[2], rtol=1e-5)
if return_counts:
tvm.testing.assert_allclose(tvm_res[4].numpy()[:num_unique], np_res[4], rtol=1e-5)
for dtype in ["int32", "int64"]:
for i in range(8):
is_dyn, is_sorted, return_counts = bool(i & 1), bool(i & 2), bool(i & 4)
verify_unique(10, dtype, is_dyn, is_sorted, return_counts)
class TestSTFT:
(
data_np,
n_fft,
hop_length,
win_length,
window_np,
normalized,
onesided,
) = tvm.testing.parameters(
(
np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32),
3,
3,
3,
np.array([4, 3, 2], dtype=np.int32),
False,
True,
),
(
np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9], [2, 5, 7, 8, 5, 6, 7, 3, 2]], dtype=np.float32),
2,
1,
2,
np.array([1, 3], dtype=np.int32),
False,
True,
),
(
np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9], [2, 5, 7, 8, 5, 6, 7, 3, 2]], dtype=np.float32),
2,
1,
2,
np.array([1, 3], dtype=np.int32),
True,
True,
),
(
np.array([[1, 2, 3, |
4, 5, 6, 7, 8, 9], [2, 5, 7, 8, 5, 6, 7, 3, 2]], dtype=np.float32),
2,
1,
2,
np.array([1, 3], dtype=np.int32),
False,
False,
),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
data_np: np.ndarray,
n_fft: int,
hop_length: int,
win_length: int,
window_np,
normalized,
onesided,
):
"""
This function calculates the expected output of segment_sum operator given the inputs.
"""
def pad_window(window_np, n_fft):
shape = window_np.shape[-1]
lpad = int((n_fft - shape)
lengths = [(0, 0)] * len(window_np.shape)
lengths[-1] = (lpad, int(n_fft - shape - lpad))
if lpad < 0:
print("ERROR Padding")
return np.pad(window_np, lengths, mode="constant") |
import math
if not onesided:
n_rows = n_fft
else:
n_rows = n_fft
if window_np is None:
window_np = np.ones(win_length, dtype=np.int32)
window_np = pad_window(window_np, n_fft)
n_cols = (data_np.shape[-1] - n_fft)
np_result = np.zeros((data_np.shape[0], n_rows, n_cols, 2))
for batch in range(data_np.shape[0]):
for w in range(n_rows):
for m in range(n_cols):
for k in range(n_fft):
np_result[batch][w][m][0] += (
window_np[k]
* data_np[batch][m * hop_length + k]
* math.cos(2 * math.pi * w * k / n_fft)
)
np_result[batch][w][m][1] -= (
window_np[k]
* data_np[batch][m * hop_length + k]
* math.sin(2 * math.pi * w * k / n_fft)
)
if normalized:
np_result[batch][w][m][0] /= math.sqrt(n_fft)
np_result[batch][w][m][1] /= math.sqrt(n_fft)
return np_result
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_stft(
self,
target,
dev,
ref_res: np.ndarray,
data_np: np.ndarray,
n_fft: int,
hop_length: int,
win_length: int,
window_np: np.ndarray,
normalized: bool,
onesided: bool,
use_dyn,
):
if use_dyn:
data = relay.var(
"data",
relay.TensorType([relay.Any(), relay.Any()], str(data_np.dtype)),
)
window = relay.var(
"window",
relay.TensorType([relay.Any()], str(window_np.dtype)),
)
backends = ["vm"]
else:
data = relay.var( |
"data",
relay.TensorType(data_np.shape, str(data_np.dtype)),
)
window = relay.var(
"window",
relay.TensorType(window_np.shape, str(window_np.dtype)),
)
backends = ["graph", "vm"]
z = relay.op.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
func = relay.Function([data, window], z)
verify_func(
target, dev, func, [data_np, window_np], ref_res, rtol=1e-3, atol=1e-3, kinds=backends
)
def test_trilu(target="llvm", dev=tvm.cpu()):
def verify_trilu(data_shape, upper=True, k=0):
data = relay.var("data", relay.TensorType(data_shape, "float32"))
y = relay.trilu(data, k, upper)
mod = tvm.ir.IRModule.from_expr(y)
data_np = np.random.normal(size=data_shape).astype("float32")
tvm_res = (
relay.create_executor("graph", mod=mod, device=dev, target=target)
.evaluate()(data_np)
.numpy()
)
if upper:
np_res = np.triu(data_np, k)
else:
np_res = np.tril(data_np, k)
tvm.testing.assert_allclose(tvm_res, np_res)
verify_trilu((3, 3), True, 0)
verify_trilu((3, 3), False, 0)
verify_trilu((6, 6), True, 1)
verify_trilu((6, 6), False, 2)
verify_trilu((6, 6), False, -2)
verify_trilu((8, 6, 6), False, -2)
def test_trilu_shape_i64():
data_x = np.ones((2, 1), dtype="int32")
x = relay.var("x", shape=[2, 1], dtype="float32")
v0 = relay.broadcast_to(x, shape=relay.const([2, 1], dtype="int64"))
v2 = relay.add(relay.const([[1.0]]), v0)
v3 = relay.trilu(v0, k=0)
f = relay.Function([x], relay.Tuple([v2, v3]))
tvm_res = relay.create_executor("graph", device=tvm.cpu(), target="llvm").evaluate(f)(data_x)
np_res = (
np.array([[2.0], [2.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32),
)
tvm.testing.assert_allclose(tvm_res[0].num |
py(), np_res[0])
tvm.testing.assert_allclose(tvm_res[1].numpy(), np_res[1])
def test_trilu_reduce():
data_i0 = np.ones((2, 2), dtype="int32")
k = 0
i0 = relay.var("i0", shape=[2, 2], dtype="int32")
i1 = relay.var("i1", shape=(), dtype="int64")
v0 = relay.trilu(i0, i1)
v1 = relay.argmin(v0, axis=[0])
f = relay.Function([i0, i1], v1)
tvm_res = (
relay.create_executor("graph", device=tvm.cpu(), target="llvm")
.evaluate(f)(data_i0, k)
.numpy()
)
np_res = np.triu(data_i0, k).argmin(axis=0)
tvm.testing.assert_allclose(tvm_res, np_res)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import numpy as np |
import numpy.random |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import relay, te
from tvm.relay |
import transform
from tvm.relay.testing |
import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_binary_op():
def check_binary_op(opfunc, ref):
n = te.size_var("n")
t1 = relay.TensorType((5, n, 5))
t2 = relay.TensorType((n, 1))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
for opfunc, ref in [(relay.power, np.power)]:
check_binary_op(opfunc, ref)
@tvm.testing.uses_gpu
def test_cmp_type():
for op, ref in (
(relay.greater, np.greater),
(relay.greater_equal, np.greater_equal),
(relay.less, np.less),
(relay.less_equal, np.less_equal),
(relay.equal, np.equal),
(relay.not_equal, np.not_equal),
):
x = relay.var("x", relay.TensorType((10, 4), "float32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "float32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "bool")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape)
t2 = relay.TensorType(y_shape) |
x = relay.var("x", t1)
y = relay.var("y", t2)
z = op(x, y)
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_binary_int_broadcast_1():
for op, ref in [(relay.right_shift, np.right_shift), (relay.left_shift, np.left_shift)]:
x = relay.var("x", relay.TensorType((10, 4), "int32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape, "int32")
t2 = relay.TensorType(y_shape, "int32")
x_data = np.random.randint(1, 10000, size=(x_shape)).astype(t1.dtype)
y_data = np.random.randint(1, 31, size=(y_shape)).astype(t2.dtype)
func = relay.Function([x, y], z)
ref_res = ref(x_data, y_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_binary_int_broadcast_2():
for op, ref in [(relay.maximum, np.maximum), (relay.minimum, np.minimum), (relay.mod, np.mod)]:
x = relay.var("x", relay.TensorType((10, 4), "int32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
z = op(x, y)
zz = run_infer |
_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape, "int32")
t2 = relay.TensorType(y_shape, "int32")
x_data = np.random.randint(1, 10000, size=(x_shape)).astype(t1.dtype)
y_data = np.random.randint(1, 10000, size=(y_shape)).astype(t2.dtype)
func = relay.Function([x, y], z)
ref_res = ref(x_data, y_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_where(executor_kind):
def run(func, inputs, ref_res):
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*inputs
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def verify(x_np, y_np, cond_np):
ref_res = np.where(cond_np, x_np, y_np)
args = []
args_np = []
vs = []
cond = relay.var("cond", relay.TensorType(cond_np.shape, "bool"))
args.append(cond)
args_np.append(cond_np)
for v_name, v_np in [("x", x_np), ("y", y_np)]:
if len(v_np.shape) == 0:
v = relay.const(v_np.item())
else:
v = relay.var(v_name, relay.TensorType(v_np.shape, dtype))
args.append(v)
args_np.append(v_np)
vs.append(v)
z = relay.where(cond, vs[0], vs[1])
func = relay.Function(args, z)
run(func, args_np, ref_res)
dtype = "float32"
x_np = np.random.uniform(size=(3, 4)).astype(dtype)
y_np = np.random.uniform(size=(3, 4)).astype(dtype)
cond_np = |
np.random.uniform(low=-1, high=1, size=(3, 4)) > 0
verify(x_np, y_np, cond_np)
x_np = np.array(1.0, dtype)
y_np = np.array(-1.0, dtype)
cond_np = np.array([1, 0, 1], dtype=bool)
verify(x_np, y_np, cond_np)
x_np = np.arange(10).astype(dtype)
y_np = 10 * x_np
cond_np = x_np < 5
verify(x_np, y_np, cond_np)
x_np = np.array([[1, 2], [3, 4]], dtype)
y_np = np.array([[5, 6], [7, 8]], dtype)
cond_np = np.array([[1], [0]], dtype=bool)
verify(x_np, y_np, cond_np)
verify(x_np, y_np, cond_np.T)
x_np = np.random.randn(1, 12, 8, 8).astype(dtype)
y_np = np.array(-1.0, dtype)
cond_np = np.random.randn(1, 1, 8, 8) > 0
verify(x_np, y_np, cond_np)
x_np, y_np = np.ogrid[:3, :4]
cond_np = np.where(x_np < y_np, x_np, 10 + y_np).astype(bool)
verify(x_np.astype(dtype), y_np.astype(dtype), cond_np)
def _with_keepdims(func):
def _wrapper(data, axis=None, keepdims=False):
if not keepdims:
return func(data, axis=axis)
else:
if axis is not None:
axis = axis if isinstance(axis, int) else axis[0]
out_shape = list(data.shape)
out_shape[axis] = 1
else:
out_shape = [1 for _ in range(len(data.shape))]
return func(data, axis=axis).reshape(out_shape)
return _wrapper
def _np_log_sum_exp(x, axis, keepdims=False):
max_x = np.max(x, axis=axis, keepdims=True)
x = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True))
x = x + max_x
if not keepdims:
x = np.squeeze(x, axis=axis)
return x
def _unbiased_relay_wrapper(f):
def _unbiased_func(x, axis=None, keepdims=False, exclude=False):
return f(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True)
return _unbiased_func
def _unbiased_np_wrapper(f):
def _unbiased_func(a, axis=None, dtype=None, keepdims=None):
return f(a, axis=axis, dtype=dtype, ddof=1, keepdims=keepdims)
return _unbiased_func
clas |
s TestReduceFunctions:
funcs = {
"sum": (relay.sum, np.sum),
"max": (relay.max, np.max),
"min": (relay.min, np.min),
"mean": (relay.mean, np.mean),
"var": (relay.variance, np.var),
"unbiased_var": (_unbiased_relay_wrapper(relay.variance), _unbiased_np_wrapper(np.var)),
"std": (relay.std, np.std),
"unbiased_std": (_unbiased_relay_wrapper(relay.std), _unbiased_np_wrapper(np.std)),
"prod": (relay.prod, np.prod),
"all": (relay.all, np.all),
"any": (relay.any, np.any),
"logsumexp": (relay.logsumexp, _np_log_sum_exp),
"argmin": (relay.argmin, _with_keepdims(np.argmin)),
"argmax": (relay.argmax, _with_keepdims(np.argmax)),
}
relay_func, ref_func = tvm.testing.parameters(
*funcs.values(),
ids=list(funcs),
)
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
data, axis, keepdims, exclude, output = tvm.testing.parameters(
((d1, d2, d3, d4), None, False, False, ()),
((d1, d2, d3, d4), 2, True, False, (d1, d2, 1, d4)),
((d1, d2, d3, d4), 0, True, False, (1, d2, d3, d4)),
((d1, d2, d3), 1, True, False, (d1, 1, d3)),
((d1, d2, d3), 0, True, False, (1, d2, d3)),
((d1, d2, d3), None, True, False, (1, 1, 1)),
((d1, d2, d3), (0, 1), True, False, (1, 1, d3)),
((2, 3, 4), 1, True, False, (2, 1, 4)),
((2, 3, 4), (1,), True, False, (2, 1, 4)),
((2, 3, 4), -1, True, False, (2, 3, 1)),
((2, 3, 4), (0, 1, 2), False, False, ()),
((4, 4, 3), None, False, False, ()),
((4, 4, 3), (0, 2), False, False, (4,)),
((128, 24, 128), (0, 1), False, False, (128,)),
((128, 24, 128), (0, 2), False, False, (24,)),
((128, 24, 128), (0, 1), True, False, (1, 1, 128)),
((128, 24, 128), (0, 2), True, False, (1, 24, 1)),
)
def test_reduce(
self,
target,
dev,
relay_func,
ref_func,
executor_kind, |
data,
axis,
keepdims,
exclude,
output,
):
dtype = "bool" if ref_func in [np.all, np.any] else "float32"
out_type = "int32" if relay_func in [relay.argmin, relay.argmax] else dtype
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and dtype == "bool":
pytest.xfail("Known failing test on vulkan runtime")
x = relay.var("x", relay.TensorType(data, dtype))
if relay_func == relay.logsumexp:
z = relay_func(x, axis, keepdims)
else:
z = relay_func(x, axis, keepdims, exclude)
zz = run_infer_type(z)
if axis:
assert "axis=" in z.astext()
if keepdims:
assert "keepdims=" in z.astext()
if exclude:
assert "exclude=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, out_type)
if all(isinstance(v, tvm.tir.Var) == 1 for v in data):
return
func = relay.Function([x], z)
x_data = (
np.random.choice([True, False], size=data)
if ref_func in [np.all]
else np.random.uniform(size=data).astype(dtype)
)
if ref_func in [np.sum]:
ref_res = ref_func(x_data + 0, axis=axis, dtype=dtype, keepdims=keepdims)
elif ref_func in [np.max, np.min, np.mean, np.prod]:
ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)
else:
if axis and not isinstance(axis, int) and len(axis) > 1:
return
ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)
op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_argmin_argmax_get_last_elements():
def get_test_case(shape, gt_func, test_argmin=False):
total_ele = np.product(shape)
arr = np.zeros(total_ele) |
target_value = -1 if test_argmin else 1
arr[: total_ele
np.random.shuffle(arr)
arr = arr.reshape(shape)
ans = gt_func(np.flip(arr))
return arr, len(arr) - ans - 1
funcs_and_gt_funcs = [(relay.argmax, np.argmax), (relay.argmin, np.argmin)]
lengths = [5, 10, 15]
for func, gt_func in funcs_and_gt_funcs:
for shape in lengths:
x_in = relay.var("x_in", shape=[shape])
output = func(x_in, select_last_index=True)
arr, ans = get_test_case(shape, gt_func, test_argmin=func == relay.argmin)
mod = tvm.IRModule.from_expr(output)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(
"graph", mod=mod, device=dev, target=target
).evaluate()(arr)
assert op_res.numpy().item() == ans
def verify_mean_var_std(executor_kind, funcs, shape, axis, keepdims, dtype="float32"):
test_func = funcs[0]
ref_func = funcs[1]
x = relay.var("x", relay.TensorType(shape, dtype))
z = test_func(x, axis, keepdims)
func = relay.Function([x], z.astuple())
x_data = np.random.uniform(size=shape).astype("float32")
ref_mean = np.mean(x_data, axis=axis, dtype="float32", keepdims=keepdims).astype(dtype)
ref_res = ref_func(x_data, axis=axis, dtype="float32", keepdims=keepdims).astype(dtype)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data.astype(dtype)
)
if dtype == "float16":
rtol, atol = (1e-2, 1e-2)
else:
rtol, atol = (1e-5, 1e-5)
tvm.testing.assert_allclose(op_res[0].numpy(), ref_mean, rtol=rtol, atol=atol)
tvm.testing.assert_allclose(op_res[1].numpy(), ref_res, rtol=rtol, atol=atol)
@tvm.testing.uses_gpu
def test_mean_var_std(executor_kind):
for func in [[relay.mean_variance, np.var], [relay.mean_std, np. |
std]]:
verify_mean_var_std(executor_kind, func, (2, 3, 4), 1, True)
verify_mean_var_std(executor_kind, func, (2, 3, 4), (1,), True)
verify_mean_var_std(executor_kind, func, (2, 3, 4), -1, True)
verify_mean_var_std(executor_kind, func, (2, 3, 4), (0, 1, 2), False)
verify_mean_var_std(executor_kind, func, (4, 4, 3), None, False)
verify_mean_var_std(executor_kind, func, (4, 4, 3), (0, 2), False)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 1), False)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 2), False)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 1), True)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 2), True)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 2), True, "float16")
verify_mean_var_std(executor_kind, func, (128, 24, 128), None, False, "float16")
@tvm.testing.uses_gpu
def test_strided_slice():
def verify(
dshape,
begin,
end,
strides,
output,
axes=None,
slice_mode="end",
test_ref=True,
dtype="int32",
unknown_dim_value=10,
):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)
dshape = list(dshape)
for i, d in enumerate(dshape):
if not isinstance(d, int):
dshape[i] = unknown_dim_value
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
x_data,
begin,
end,
strides,
slice_mode,
axes=axes,
)
if strides:
z = relay.strided_slice(
x, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
else:
z = relay.strided_slice(x, begin=be |
gin, end=end, axes=axes, slice_mode=slice_mode)
func = relay.Function([x], z)
func = run_infer_type(func)
text = func.astext()
assert "begin=" in text
assert "end=" in text
if output:
assert func.body.checked_type == relay.ty.TensorType(output, "float32")
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("vm", device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify((1, 3, 10, 10), [0, 0, 0, 0], [-1, 3, 10, 10], [1], (0, 3, 10, 10), dtype="int64")
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
(1, 120, 120, 3),
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify((3, 4, 3), [-1, -1, -1], [-5, -5, -5], [-1, -1, -1], (3, 4, 3))
verify((3, 4, 3), [0, 0, 0], [np.iinfo(np.int32).max] * 3, [1, 1, 1], (3, 4, 3))
verify(
(3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode="size", test_ref=False
)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode="size", test_ref=True)
verify((3, 4, 3), [1], [4], None, None, axes=[1])
verify((3, relay.Any()), [0], [1], [1], None, axes=[1], unknown_dim_value=10)
verify((relay.Any(), 3), [0], [1], [1], None, axes=[1], unknown_dim_value=10)
verify(
(relay.Any(), relay.Any |
(), relay.Any()),
[0, 1, 2],
[5, 5, 5],
[1, 2, 1],
None,
unknown_dim_value=10,
)
@tvm.testing.uses_gpu
def test_dyn_strided_slice():
def verify(
dshape,
begin,
end,
strides,
output,
axes=None,
ishape=None,
slice_mode="end",
test_ref=True,
dtype="int32",
):
ndim = len(dshape)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
x_data, begin, end, strides, slice_mode, axes=axes
)
if ishape is None:
ishape = (relay.Any(),) * ndim
x = relay.var("x", relay.TensorType(ishape, "float32"))
if strides:
z = relay.strided_slice(
x, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
else:
z = relay.strided_slice(x, begin=begin, end=end, axes=axes, slice_mode=slice_mode)
func = relay.Function([x], z)
func = run_infer_type(func)
text = func.astext()
assert "begin=" in text
assert "end=" in text
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
(1, 120, 120, 3),
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], |
[4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify(
(3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode="size", test_ref=False
)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode="size", test_ref=True)
verify(
(3, 4, 3, 2),
[1, 0],
[3, 1],
[1, 1],
None,
axes=[1, 3],
ishape=(relay.Any(), 4, relay.Any(), 2),
)
@tvm.testing.uses_gpu
def test_strided_set():
def verify(dshape, begin, end, strides, vshape, test_ref=True):
x = relay.var("x", relay.TensorType(dshape, "float32"))
v = relay.var("v", relay.TensorType(vshape, "float32"))
begin_c = relay.const(begin, dtype="int32")
end_c = relay.const(end, dtype="int32")
if strides:
strides_c = relay.const(strides, dtype="int32")
z = relay.strided_set(x, v, begin=begin_c, end=end_c, strides=strides_c)
else:
z = relay.strided_set(x, v, begin=begin_c, end=end_c)
func = relay.Function([x, v], z)
func = run_infer_type(func)
text = func.astext()
assert "strided_set" in text
print(text)
assert func.body.checked_type == relay.ty.TensorType(dshape, "float32")
if not test_ref:
return
x_data = np.random.uniform(size=dshape).astype("float32")
v_data = np.random.uniform(size=vshape).astype("float32")
ref_res = tvm.topi.testing.strided_set_python(x_data, v_data, begin, end, strides)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, v_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
v |
erify((3, 4, 16), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2], (1, 2, 2))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))
if __name__ == "__main__":
tvm.testing.main() |
""" Support level5 operator test cases.
""" |
import math |
import platform |
import sys |
import numpy as np |
import pytest |
import tvm |
import tvm.testing |
import tvm.topi.testing
from tvm |
import relay, te
from tvm.relay.testing |
import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
def test_resize1d_infer_type():
n, c, w = te.size_var("n"), te.size_var("c"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
tw = te.var("tw")
z = relay.image.resize1d(x, (tw,))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
z = relay.image.resize1d(x, (200,), None, "NCW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 200), "int8")
class TestResize1D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NWC", "NCW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4), 2),
((2, 8, 17), 3),
((2, 8, 17), 3),
((3, 4, 5), 5),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and dshape == (3, 4, 5)
and scale == 5
and interpolate_method == "nearest_neighbor"
and coord_trans == "align_corners"
):
pytest.xfail("Known failing case for these parameters")
if layout == "NWC":
size = (dshape[1] * scale,)
else:
size = (dshape[2] * scale,)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize1d_python(
x_data, (scale,), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize1d(
x, size, None, layout, interpolate_method, coordinate_transformation_mode=coord_tran |
s
)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4)
def test_resize2d_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
th, tw = te.var("th"), te.var("tw")
z = relay.image.resize2d(x, (th, tw))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, th, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
z = relay.image.resize2d(x, (100, 200), None, "NCHW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 100, 200), "int8")
class TestResize2D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NHWC", "NCHW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4, 4), 2),
((2, 8, 17, 20), 3),
((2, 8, 17, 20), 3),
((3, 4, 5, 6), 5),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and dshape == (3, 4, 5, 6)
and scale == 5
and interpolate_method == "nearest_neighbor"
and coord_trans == "align_corners"
):
pytest.xfail("Known failing case for these parameters")
if layout == "NHWC":
size = (dshape[1] * scale, dshape[2] * scale)
else: |
size = (dshape[2] * scale, dshape[3] * scale)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize2d(
x, size, None, layout, interpolate_method, coordinate_transformation_mode=coord_trans
)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4)
def test_resize3d_infer_type():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
td, th, tw = te.var("td"), te.var("th"), te.var("tw")
z = relay.image.resize3d(x, (td, th, tw))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, td, th, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
z = relay.image.resize3d(x, (10, 10, 20), None, "NCDHW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 10, 10, 20), "int8")
class TestResize3D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NDHWC", "NCDHW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4, 4, 4), 2),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, co |
ord_trans
):
if layout == "NDHWC":
size = (dshape[1] * scale, dshape[2] * scale, dshape[3] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale, dshape[4] * scale)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize3d_python(
x_data, (scale, scale, scale), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize3d(x, size, None, layout, interpolate_method, coord_trans)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
class TestCropAndResize:
interpolate_method = tvm.testing.parameter("bilinear", "nearest_neighbor")
layout = tvm.testing.parameter("NHWC", "NCHW")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https:
)
def test_crop_and_resize(self, target, dev, executor_kind, layout, interpolate_method):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and layout == "NHWC"
and interpolate_method == "nearest_neighbor"
):
pytest.xfail("Known failing case for these parameters")
extrapolation_value = 0.0
if layout == "NHWC":
img_shape = (10, 224, 224, 3)
boxes = np.array([[0.1, 0.2, 0.8, 0.7], [0.2, 0, 1, 0.6]]).astype("float32")
box_indices = np.array([1, 0]).astype("int32")
crop_size = np.array([20, 30]).astype("int32")
elif layout == "NCHW":
img_shape = (5, 3, 255, 255)
boxes = np.array([[0, |
0, 1, 1], [0.2, 0.1, 1, 0.9]]).astype("float32")
box_indices = np.array([0, 1]).astype("int32")
crop_size = np.array([30, 30]).astype("int32")
else:
raise ValueError(f"Unknown layout: {layout}")
image_data = np.random.uniform(size=img_shape).astype("float32")
ref_res = tvm.topi.testing.crop_and_resize_python(
image_data,
boxes,
box_indices,
crop_size,
layout,
interpolate_method,
extrapolation_value,
)
img = relay.var("img", relay.TensorType(img_shape, "float32"))
bx = relay.var("bx", relay.TensorType(boxes.shape, "float32"))
bx_idx = relay.var("bx_idx", relay.TensorType(box_indices.shape, "int32"))
z = relay.image.crop_and_resize(
img, bx, bx_idx, list(crop_size), layout, interpolate_method, extrapolation_value
)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([img, bx, bx_idx], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
image_data, boxes, box_indices
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-04)
@tvm.testing.uses_gpu
def test_multibox_prior(executor_kind):
def get_ref_result(
dshape, sizes=(1.0,), ratios=(1.0,), steps=(-1.0, -1.0), offsets=(0.5, 0.5), clip=True
):
in_height = dshape[2]
in_width = dshape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
dtype = "float32"
np_out = np.zeros(oshape).astype(dtype)
for i in range(in |
_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = (
size_ratio_concat[k] * in_height / in_width / 2.0
if k < num_sizes
else size_ratio_concat[0]
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0
)
h = (
size_ratio_concat[k] / 2.0
if k < num_sizes
else size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
np_out[0][count][0] = center_w - w
np_out[0][count][1] = center_h - h
np_out[0][count][2] = center_w + w
np_out[0][count][3] = center_h + h
if clip:
np_out = np.clip(np_out, 0, 1)
return np_out
def verify_multibox_prior(
x,
dshape,
ref_res,
sizes=(1.0,),
ratios=(1.0,),
steps=(-1.0, -1.0),
offsets=(0.5, 0.5),
clip=True,
check_size=False,
check_type_only=False,
):
z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)
zz = run_infer_type(z)
if check_size:
assert "sizes=" in z.astext()
assert zz.checked_type == relay.TensorType(
(1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4), "float32"
)
if check_type_only:
return
data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") |
func = relay.Function([x], z)
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
sizes = (0.3, 1.5, 0.7)
ratios = (1.3, 2.4)
steps = (2.0, 1.5)
offsets = (0.2, 0.3)
dshape = (1, 3, 56, 56)
ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)
x = relay.var("x", relay.TensorType(dshape, "float32"))
verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True)
y = relay.var("y", relay.TensorType((te.size_var("n"), 3, 56, 56), "float32"))
verify_multibox_prior(
x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True
)
dshape = (1, 24, 32, 32)
ref_res = get_ref_result(dshape, clip=False)
x = relay.var("x", relay.TensorType(dshape, "float32"))
verify_multibox_prior(x, dshape, ref_res, clip=False)
y = relay.var("y", relay.TensorType((te.size_var("n"), 24, 32, 32), "float32"))
verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)
@tvm.testing.uses_gpu
def test_get_valid_counts():
def verify_get_valid_counts(dshape, score_threshold, id_index, score_index):
dtype = "float32"
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor))
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length): |
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index)
assert "score_threshold" in z.astext()
func = relay.Function([x], z.astuple())
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
out = relay.create_executor("vm", device=dev, target=target).evaluate(func)(np_data)
tvm.testing.assert_allclose(out[0].numpy(), np_out1, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[1].numpy(), np_out2, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[2].numpy(), np_out3, rtol=1e-3, atol=1e-04)
verify_get_valid_counts((1, 2500, 6), 0, 0, 1)
verify_get_valid_counts((1, 2500, 5), -1, -1, 0)
verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0)
verify_get_valid_counts((16, 500, 5), 0.95, -1, 0)
@tvm.testing.uses_gpu
def test_non_max_suppression(executor_kind):
def verify_nms(
x0_data,
x1_data,
x2_data,
x3_data,
dshape,
ref_res,
ref_indices_res,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
check_type_only=False,
):
x0 = relay.var("x0", relay.ty.TensorType(dshape, "float32"))
x1 = relay.var("x1", relay.ty.TensorType((dshape[0],), "int32"))
x2 = relay.var("x2", relay.ty.TensorType((dshape[0], dshape[1]), "int32"))
x3 = relay.var("x3", relay.ty.TensorType((), "int32"))
z = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=iou_threshold,
force |
_suppress=force_suppress,
top_k=top_k,
return_indices=False,
)
z_indices = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=iou_threshold,
force_suppress=force_suppress,
top_k=top_k,
return_indices=True,
)
if isinstance(z_indices, relay.expr.TupleWrapper):
z_indices = z_indices.astuple()
zz = run_infer_type(z)
zz_indices = run_infer_type(z_indices)
assert zz.checked_type == relay.ty.TensorType(dshape, "float32")
assert zz_indices.checked_type == relay.ty.TupleType(
[
relay.ty.TensorType((dshape[0], dshape[1]), "int32"),
relay.ty.TensorType((dshape[0], 1), "int32"),
]
)
if check_type_only:
return
func = relay.Function([x0, x1, x2, x3], z)
func = run_infer_type(func)
func_indices = relay.Function([x0, x1, x2, x3], z_indices)
func_indices = run_infer_type(func_indices)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x0_data, x1_data, x2_data, x3_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
op_indices_res = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(func_indices)(x0_data, x1_data, x2_data, x3_data)
tvm.testing.assert_allclose(op_indices_res[0].numpy(), ref_indices_res, rtol=1e-5)
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_ |
indices = np.array([[0, 1, 3, 4, -1]]).astype("int32")
np_max_output_size = -1
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[4, 0, -1, -1, -1]])
num_anchors = 5
dshape = (te.size_var("n"), num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=True,
)
dshape = (1, num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=False,
)
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[4, 0, -1, -1, -1]])
np_max_output_size = 2
dshape = (te.size_var("n"), num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
check_type_only=True,
)
dshape = (1, num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
top_k=2,
)
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45, 1, 2, 3, 4],
[1, 0.7, 30, 60, 50, 80, 5, 6, 7, 8],
[ |
0, 0.4, 4, 21, 19, 40, 9, 10, 11, 12],
[2, 0.9, 35, 61, 52, 79, 13, 14, 15, 16],
[1, 0.5, 100, 60, 70, 110, 17, 18, 19, 20],
]
]
).astype("float32")
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79, 13, 14, 15, 16],
[0, 0.8, 1, 20, 25, 45, 1, 2, 3, 4],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
]
)
dshape = (1, 5, 10)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=False,
)
@tvm.testing.uses_gpu
def test_multibox_transform_loc(executor_kind):
def test_default_value():
num_anchors = 3
num_classes = 3
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]]).astype(
"float32"
)
np_loc_preds = np.array(
[[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]]
).astype("float32")
np_anchors = np.array(
[[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]]
).astype("float32")
expected_np_out = np.array(
[
[
[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292],
]
]
)
cls_prob = relay.var(
"cls_prob", relay.ty.TensorType((1, num_anchors, num_classes), "float32")
)
loc_pred = relay.var("loc_pred", relay.ty.TensorType((1, num_anchors * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
mtl = relay.vision.mult |
ibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors
)
ret = run_infer_type(mtl.astuple())
ref_type = relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, num_anchors, 6), "float32"),
relay.ty.TensorType((1,), "int"),
]
)
)
assert ret.checked_type == ref_type
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], mtl[0], return_indices=False)
func = relay.Function([cls_prob, loc_pred, anchors], nms)
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_cls_prob, np_loc_preds, np_anchors
)
tvm.testing.assert_allclose(op_res.numpy(), expected_np_out, rtol=1e-5)
def test_threshold():
num_anchors = 5
num_classes = 5
n = te.size_var("n")
cls_prob = relay.var(
"cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")
)
loc_pred = relay.var("loc_pred", relay.ty.TensorType((n, num_anchors * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
threshold = 0.02
variances = (0.2, 0.2, 0.3, 0.3)
ret = relay.vision.multibox_transform_loc(
cls_prob=cls_prob,
loc_pred=loc_pred,
anchor=anchors,
threshold=threshold,
variances=variances,
)
ret = run_infer_type(ret.astuple())
ref_type = relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((n, num_anchors, 6), "float32"),
relay.ty.TensorType((n,), "int"),
]
)
)
assert ret.checked_type == ref_type
test_default_value()
test_threshold()
@ |
tvm.testing.uses_gpu
def test_roi_align(executor_kind):
def verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
layout,
ref_func,
):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_align(
data,
rois,
pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
layout=layout,
)
zz = run_infer_type(z)
num_roi = rois_shape[0]
if layout == "NCHW":
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32"
)
else:
assert zz.checked_type == relay.ty.TensorType(
(num_roi, pooled_size, pooled_size, channel), "float32"
)
func = relay.Function([data, rois], z)
func = run_infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype("float32") * in_size
np_rois[:, 0] = np.random.randint(low=0, high=data_shape[0], size=num_roi)
ref_res = ref_func(
np_data,
np_rois,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data, np_rois
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, atol=1e-6, rtol=1e-3)
def verify_roi_align_nchw(
data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode
): |
_, channel, in_size, _ = data_shape
return verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
"NCHW",
tvm.topi.testing.roi_align_nchw_python,
)
def verify_roi_align_nhwc(
data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode
):
_, in_size, _, channel = data_shape
return verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
"NHWC",
tvm.topi.testing.roi_align_nhwc_python,
)
verify_roi_align_nchw(
(1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="avg"
)
verify_roi_align_nchw(
(4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="avg"
)
verify_roi_align_nchw(
(1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="max"
)
verify_roi_align_nchw(
(4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="max"
)
verify_roi_align_nhwc(
(1, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="avg"
)
verify_roi_align_nhwc(
(4, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="avg"
)
verify_roi_align_nhwc(
(1, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="max"
)
verify_roi_align_nhwc(
(4, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="max"
)
@tvm.testing.uses_gpu
def test_roi_pool(executor_kind):
def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois |
= relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_pool(
data,
rois,
pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale,
layout="NCHW",
)
zz = run_infer_type(z)
batch, channel, in_size, _ = data_shape
num_roi = rois_shape[0]
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32"
)
func = relay.Function([data, rois], z)
func = run_infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype("float32") * in_size
np_rois[:, 0] = np.random.randint(low=0, high=batch, size=num_roi).astype("float32")
ref_res = tvm.topi.testing.roi_pool_nchw_python(
np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data, np_rois
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0)
verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5)
@tvm.testing.uses_gpu
def test_proposal(executor_kind):
def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):
cls_prob = relay.var("cls_prob", relay.ty.TensorType(np_cls_prob.shape, "float32"))
bbox_pred = relay.var("bbox_pred", relay.ty.TensorType(np_bbox_pred.shape, "float32"))
im_info = relay.var("im_info", relay.ty.TensorType(np_im_info.shape, "float32"))
z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(np_out.shape, "float32")
func = relay. |
Function([cls_prob, bbox_pred, im_info], z)
func = run_infer_type(func)
for target in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(target):
print("Skip test because %s is not enabled." % target)
continue
dev = tvm.device(target, 0)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_cls_prob, np_bbox_pred, np_im_info
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-4)
attrs = {
"scales": (0.5,),
"ratios": (0.5,),
"feature_stride": 16,
"iou_loss": False,
"rpn_min_size": 16,
"threshold": 0.7,
"rpn_pre_nms_top_n": 200,
"rpn_post_nms_top_n": 4,
}
np_cls_prob = np.array(
[
[
[[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],
[[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]],
]
],
dtype="float32",
)
np_bbox_pred = np.array(
[
[
[[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],
[[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],
[[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],
[[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],
]
],
dtype="float32",
)
np_im_info = np.array([[48.0, 48.0, 1.0]], dtype="float32")
np_out = np.array(
[
[0.0, 0.0, 2.8451548, 28.38012, 18.154846],
[0.0, 0.0, 15.354933, 41.96971, 41.245064],
[0.0, 18.019852, 1.0538368, 51.98015, 25.946163],
[0.0, 27.320923, -1.266357, 55.0, 24.666357],
],
dtype="float32",
)
verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
np_out = np.array(
[
[0.0, -5.25, -2.5, 21.75, 19.0],
[0.0, 11.25, -2.0, 37.25, 18.5],
[0.0, 26.849998, -2.3000002, |
53.45, 18.6],
[0.0, -4.95, 13.799999, 22.25, 35.5],
],
dtype="float32",
)
attrs["iou_loss"] = True
verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
def test_yolo_reorg_infer_shape():
def verify_yolo_reorg(shape, stride, out_shape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
zz = run_infer_type(z)
assert "stride=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(out_shape, "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
idxd = tvm.tir.indexdiv
verify_yolo_reorg((n, c, 20, 20), 10, (n, c * 10 * 10, 2, 2))
verify_yolo_reorg((n, c, h, w), 2, (n, c * 2 * 2, idxd(h, 2), idxd(w, 2)))
@tvm.testing.uses_gpu
def test_yolo_reorg(executor_kind):
def verify_yolo_reorg(shape, stride):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = tvm.topi.testing.reorg_python(x_data, stride)
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
zz = run_infer_type(z)
assert "stride=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_yolo_reorg((1, 100, 20, 20), 10)
verify_yolo_reorg((1, 4, 6, 6), 2)
class TestDeformableConv2D:
batch, in_channel, size, out_channel, deformable_groups = tvm.testing.parameters(
(1, 4, 16, 4, 4),
(2, 4, 16, 4, 1),
)
kernel_size = tvm.testing.parameter((3, 3))
groups = tvm.testing.parameter(1, 2)
layout = tvm.testing.parameter("NCHW", "NHWC" |
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture
def data_shape(self, layout, batch, in_channel, size):
if layout == "NCHW":
return (batch, in_channel, size, size)
elif layout == "NHWC":
return (batch, size, size, in_channel)
@tvm.testing.fixture
def kernel_shape(self, layout, in_channel, out_channel, groups, kernel_size):
if layout == "NCHW":
return (out_channel, in_channel
elif layout == "NHWC":
return (kernel_size[0], kernel_size[1], in_channel
@tvm.testing.fixture
def out_shape(self, layout, batch, out_channel, size):
if layout == "NCHW":
return (batch, out_channel, size, size)
elif layout == "NHWC":
return (batch, size, size, out_channel)
@tvm.testing.fixture
def offset_shape(self, layout, batch, kernel_size, deformable_groups, out_shape):
if layout == "NCHW":
return (
batch,
2 * kernel_size[0] * kernel_size[1] * deformable_groups,
out_shape[2],
out_shape[3],
)
elif layout == "NHWC":
return (
batch,
out_shape[1],
out_shape[2],
2 * kernel_size[0] * kernel_size[1] * deformable_groups,
)
@tvm.testing.fixture
def kernel_layout(self, layout):
return {"NCHW": "OIHW", "NHWC": "HWIO"}[layout]
@tvm.testing.fixture
def relay_setup(
self,
dtype,
data_shape,
layout,
kernel_layout,
kernel_size,
deformable_groups,
groups,
out_channel,
):
data = relay.var("data", shape=data_shape, dtype=dtype)
offset = relay.var("offset", dtype=dtype)
kernel = relay.var("kernel", dtype=dtype)
expr = relay.nn.deformable_conv2d(
data,
offset,
kernel,
strides=(1, 1),
padding=(1, 1), |
dilation=(1, 1),
data_layout=layout,
kernel_layout=kernel_layout,
kernel_size=kernel_size,
deformable_groups=deformable_groups,
groups=groups,
channels=out_channel,
)
func = relay.Function([data, offset, kernel], expr)
return expr, func
def test_infer_type(self, relay_setup, out_shape, offset_shape, kernel_shape):
expr, func = relay_setup
yy = run_infer_type(expr)
assert yy.checked_type == relay.TensorType(out_shape), yy.checked_type
assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type
assert yy.args[2].checked_type == relay.TensorType(kernel_shape), yy.args[2].checked_type
@pytest.mark.parametrize("groups", [1])
def test_run(
self,
target,
dev,
dtype,
executor_kind,
data_shape,
offset_shape,
kernel_shape,
relay_setup,
deformable_groups,
groups,
layout,
):
target = tvm.target.Target(target)
if layout == "NHWC" and target.kind.name != "llvm":
pytest.xfail("Can only run NHWC layout on llvm")
expr, func = relay_setup
data = np.random.uniform(size=data_shape).astype(dtype)
offset = np.random.uniform(size=offset_shape).astype(dtype)
kernel = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NCHW":
ref_res = tvm.topi.testing.deformable_conv2d_nchw_python(
data,
offset,
kernel,
stride=(1, 1),
padding=(1, 1),
dilation=(1, 1),
deformable_groups=deformable_groups,
groups=groups,
)
else:
ref_res = tvm.topi.testing.deformable_conv2d_nhwc_python(
data,
offset,
kernel,
stride=(1, 1),
padding=(1, 1), |
dilation=(1, 1),
deformable_groups=deformable_groups,
groups=groups,
)
op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data, offset, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_depth_to_space(executor_kind):
def verify_depth_to_space(dshape, block_size, layout, mode):
if layout == "NHWC":
out_shape = [
dshape[0],
dshape[1] * block_size,
dshape[2] * block_size,
dshape[3] / (block_size * block_size),
]
else:
out_shape = [
dshape[0],
dshape[1] / (block_size * block_size),
dshape[2] * block_size,
dshape[3] * block_size,
]
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
ref_res = tvm.topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.depth_to_space(x, block_size, layout, mode)
assert "block_size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
for layout in ["NHWC", "NCHW"]:
for mode in ["DCR", "CDR"]:
verify_depth_to_space((1, 4, 4, 4), 2, layout, |
mode)
@tvm.testing.uses_gpu
def test_space_to_depth(executor_kind):
def verify_space_to_depth(dshape, block_size, layout):
if layout == "NHWC":
out_shape = [
dshape[0],
dshape[1] / block_size,
dshape[2] / block_size,
dshape[3] * (block_size * block_size),
]
else:
out_shape = [
dshape[0],
dshape[1] * (block_size * block_size),
dshape[2] / block_size,
dshape[3] / block_size,
]
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
ref_res = tvm.topi.testing.space_to_depth_python(x_data, block_size)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.space_to_depth(x, block_size, layout)
assert "block_size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
for layout in ["NHWC", "NCHW"]:
verify_space_to_depth((1, 4, 4, 4), 2, layout)
def test_dilation2d_infer_type():
n, h, w, c = te.var("n"), 224, 224, 10
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
kc, kh, kw = 10, 8, 8
w = relay.var("w", relay.ty.TensorType((kc, kw, kh), "float32"))
y = relay.image.dilation2d(
x,
w,
strides=[1, 1, 1, 1],
dilations=[1, 1, 1, 1],
padding=[0, 0, 0, 0], |
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 217, 217), "float32")
class TestDilation2DRun:
data_layout, kernel_layout = tvm.testing.parameters(("NCHW", "IHW"), ("NHWC", "HWI"))
dtype = tvm.testing.parameter("float32")
config = tvm.testing.parameter(
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5]]]],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5], [0.6]], [[0.7], [0.8]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1, 0.2, 0.0], [0.2, 0.3, 0.1]], [[0.3, 0.4, 0.2], [0.4, 0.5, 0.3]]]],
kernel=[[[0.4, 0.5, 0.3], [0.3, 0.4, 0.2]], [[0.1, 0.2, 0.0], [0.0, 0.1, -0.1]]],
out=[[[[0.5, 0.7, 0.3], [0.6, 0.8, 0.4]], [[0.7, 0.9, 0.5], [0.8, 1.0, 0.6]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]], [[[0.2], [0.3]], [[0.4], [0.5]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5], [0.6]], [[0.7], [0.8]]], [[[0.6], [0.7]], [[0.8], [0.9]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]]],
out=[[[[0.5]], [[0.7]]]],
),
dict(
image=[[[[0.1], [0.2], [0.3]], [[0.4], [0.5], [0.6]], [[0.7], [0.8], [0.9]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.2]]],
out=[[[[0.7], [0.8], [0.6]], [[1.0], [1.1], [0.9]], [[0.8], [0.9], [0.9]]]],
padding=[1, 1],
dilations=[2, 2],
),
dict(
image=[
[
[[0.1], [0.2], [0.3], [0.4]],
[[0.5], [0.6], [0.7], [0.8]],
[[0.9], [1.0], [1.1], [1.2]],
]
],
kernel=[[[0.4], [0.3] |
], [[0.1], [0.2]]],
out=[[[[0.8], [1.0]], [[1.2], [1.4]]]],
strides=[1, 2],
),
)
@tvm.testing.fixture
def test_case(self, config, data_layout, dtype):
indata = np.array(config["image"], dtype=dtype)
kernel = np.array(config["kernel"], dtype=dtype)
out = np.array(config["out"], dtype=dtype)
if data_layout == "NHWC":
pass
elif data_layout == "NCHW":
indata = indata.transpose([0, 3, 1, 2])
kernel = kernel.transpose([2, 0, 1])
out = out.transpose([0, 3, 1, 2])
else:
raise ValueError(f"Unsupported layout '{data_layout}'")
return indata, kernel, out
@tvm.testing.parametrize_targets("llvm")
def test_dilation2d(
self,
target,
dev,
test_case,
dtype,
config,
data_layout,
kernel_layout,
):
strides = config.get("strides", [1, 1])
padding = config.get("padding", [0, 0])
dilations = config.get("dilations", [1, 1])
indata, kernel, out = test_case
x = relay.var("x", shape=indata.shape, dtype=dtype)
w = relay.var("w", shape=kernel.shape, dtype=dtype)
y = relay.image.dilation2d(
x,
w,
strides=strides,
dilations=dilations,
padding=padding,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function([x, w], y)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
indata, kernel
)
tvm.testing.assert_allclose(op_res.numpy(), out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_affine_grid(executor_kind):
def verify_affine_grid(num_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
y = relay.image.affine_grid(data, target_shape) |
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType(
(num_batch, len(target_shape), *target_shape), dtype
)
func = relay.Function([data], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5, atol=1e-5)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample(executor_kind):
def verify_grid_sample(
data_shape, grid_shape, method="bilinear", padding_mode="zeros", align_corners=True
):
dtype = "float32"
data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
grid = relay.var("grid", relay.ty.TensorType(grid_shape, dtype))
if len(data_shape) == 4:
layout = "NCHW"
batch, channel, _, _ = data_shape
_, _, out_height, out_width = grid_shape
tensor_type = relay.TensorType((batch, channel, out_height, out_width), dtype)
else:
layout = "NCDHW"
batch, channel, _, _, _ = data_shape
_, _, out_depth, out_height, out_width = grid_shape
tensor_type = relay.TensorType(
(batch, channel, out_depth, out_height, out_width), dtype
)
y = relay.image.grid_sample(
data,
grid,
method=method,
layout=layout,
padding_mode=padding_mode,
align_corners=align_corners,
)
yy = run_infer_type(y)
assert yy.checked_type == tensor_type
func = relay.Function([data, grid], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
grid_np = np.random.uniform(siz |
e=grid_shape, low=-1.5, high=1.5).astype(dtype)
ref_res = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_corners
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, grid_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5, atol=1e-5)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(data_2D_shape, grid_2D_shape, _method, _padding, _align)
if _method != "bicubic":
verify_grid_sample(data_3D_shape, grid_3D_shape, _method, _padding, _align)
@tvm.testing.uses_gpu
def test_space_to_batch_nd(executor_kind):
def verify_space_to_batch_nd(dshape, block_shape, paddings):
x_data = np.random.uniform(size=dshape).astype("float32")
pad_before, pad_after = map(list, zip(*paddings))
ref_res = tvm.topi.testing.space_to_batch_nd_python(
x_data, block_shape, pad_before, pad_after
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.space_to_batch_nd(x, block_shape, paddings)
assert "block_shape=" in z.astext()
assert "paddings=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data |
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_space_to_batch_nd([3, 3, 2, 1], [3], [[0, 0]])
verify_space_to_batch_nd([2, 2, 4, 1], [2, 2], [[0, 0], [2, 0]])
@tvm.testing.uses_gpu
def test_batch_to_space_nd(executor_kind):
def verify_batch_to_space_nd(dshape, block_shape, crops):
x_data = np.random.uniform(size=dshape).astype("float32")
crop_begin_list, crop_end_list = map(list, zip(*crops))
ref_res = tvm.topi.testing.batch_to_space_nd_python(
x_data, block_shape, crop_begin_list, crop_end_list
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.batch_to_space_nd(x, block_shape, crops)
assert "block_shape=" in z.astext()
assert "crops=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_batch_to_space_nd([4, 1, 1, 3], [2, 2], [[0, 0], [0, 0]])
verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [[0, 0], [2, 0]])
@tvm.testing.uses_gpu
def test_all_class_non_max_suppression(executor_kind):
def verify_all_class_non_max_suppression(
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected_indices,
):
boxes = relay.var("boxes", relay.ty.TensorType(boxes_np.shape, "float32"))
scores = relay.var("scores", relay.ty.TensorType(scores_np.shape, "float32"))
out = relay.vision.all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
)
func = re |
lay.Function([boxes, scores], out.astuple())
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
selected_indices, num_detections = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(func)(boxes_np, scores_np)
tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]]
np.testing.assert_equal(tvm_res, expected_indices)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.0
expected = np.array(
[[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 2], [1, 0, 4], [1, 0, 1], [1, 1, 4], [1, 1, 1]]
)
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = 3
iou_threshold = 0.5
score_threshold = 0.4
expected = np.array([ |
[0, 0, 3], [0, 0, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
if __name__ == "__main__":
tvm.testing.main() |
""" Support level6 operator test cases.
""" |
import pytest |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.topi.testing |
import searchsorted_ref |
import tvm.testing
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_sort():
def verify_sort(shape, axis, is_ascend, is_dyn=False, in_dtype="float32"):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()] * len(shape), in_dtype))
else:
x = relay.var("x", relay.TensorType(shape, in_dtype))
z = relay.sort(x, axis=axis, is_ascend=is_ascend)
func = relay.Function([x], z)
x_data = np.random.uniform(size=shape).astype(in_dtype)
if is_ascend:
ref_res = np.sort(x_data, axis=axis)
else:
ref_res = -np.sort(-x_data, axis=axis)
if is_dyn:
backend = "vm"
else:
backend = "graph"
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(backend, mod=mod, device=dev, target=target).evaluate()(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
for is_dyn in [False, True]:
verify_sort((2, 3, 4), axis=0, is_ascend=False, is_dyn=is_dyn)
verify_sort((1, 4, 6), axis=1, is_ascend=True, is_dyn=is_dyn)
verify_sort((3, 5, 6), axis=-1, is_ascend=False, is_dyn=is_dyn)
verify_sort((3, 2000, 6), axis=1, is_ascend=False, is_dyn=is_dyn)
verify_sort((1, 122640), axis=1, is_ascend=False, is_dyn=is_dyn)
verify_sort((1, 122640), axis=1, is_ascend=False, is_dyn=is_dyn, in_dtype="float16")
@tvm.testing.uses_gpu
def test_argsort():
def verify_argsort(shape, axis, is_ascend, dtype, is_dyn=False, in_dtype="float32"):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()] * len(shape), in_dtype))
else:
x = relay.var("x", relay.TensorType(shape, in_dtype))
z = relay.argsort(x, axis=axis, is_ascend=is_ascend, dtype=dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(size=sha |
pe).astype(in_dtype)
if is_ascend:
ref_res = np.argsort(x_data, axis=axis, kind="stable")
else:
ref_res = np.argsort(-x_data, axis=axis, kind="stable")
if is_dyn:
backend = "vm"
else:
backend = "graph"
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(backend, mod=mod, device=dev, target=target).evaluate()(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.astype(dtype), rtol=1e-5)
for is_dyn in [False, True]:
for dtype in ["int32", "int64", "float32", "float64"]:
verify_argsort((2, 3, 4), axis=0, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((1, 4, 6), axis=1, is_ascend=True, dtype=dtype, is_dyn=is_dyn)
dtype = "int32"
verify_argsort((3, 5, 6), axis=-1, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((3, 6000, 6), axis=1, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((1000, 1, 1), axis=0, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((1, 122640), axis=1, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort(
(1, 122640), axis=1, is_ascend=False, dtype=dtype, is_dyn=is_dyn, in_dtype="float16"
)
@tvm.testing.uses_gpu
def test_topk(executor_kind):
def verify_topk(k, axis, ret_type, is_ascend, dtype, in_dtype="float32"):
shape = (20, 100)
x = relay.var("x", relay.TensorType(shape, in_dtype))
out = relay.topk(x, k, axis, ret_type, is_ascend, dtype)
if isinstance(out, relay.expr.TupleWrapper):
out = out.astuple()
func = relay.Function([x], out)
np_data = np.random.uniform(size=shape).astype(in_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis, kind="stable")
else:
np_indices = np.argsort(-np_data, axis=axis, k |
ind="stable")
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(in_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(in_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data
)
if ret_type == "both":
tvm.testing.assert_allclose(op_res[0].numpy(), np_values)
tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)
elif ret_type == "values":
tvm.testing.assert_allclose(op_res.numpy(), np_values)
else:
tvm.testing.assert_allclose(op_res.numpy(), np_indices)
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
verify_topk(k, axis, ret_type, False, "int64", "float16")
@tvm.testing.uses_gpu
def test_searchsorted():
def verify_searchsorted(right, dtype):
shape = (8, 9, 10)
values_shape = shape[:-1] + (10,)
sorted_sequence = relay.var("sorted_sequence", relay.TensorType(shape, "float32"))
values = relay.var("sorted_sequence", relay.TensorType(values_shape, "float32"))
out = relay.searchsorted(sorted_sequence, values, right, dtype)
func = relay.Function([sorted_sequence, values], out)
sorted_sequence_np = np.sort(np.random.randn(*shape).astype("float32"), axis=-1) |
values_np = np.random.randn(*values_shape).astype("float32")
np_indices = searchsorted_ref(sorted_sequence_np, values_np, right, dtype)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
sorted_sequence_np, values_np
)
np.testing.assert_equal(op_res.numpy(), np_indices)
verify_searchsorted(False, "int32")
verify_searchsorted(True, "int64")
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm |
import numpy as np
from tvm |
import relay
def test_tflite_same_io_qnn_params():
data_dtype = "uint8"
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.00784314, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.00784314, "float32"),
rhs_zero_point=relay.const(127, "int32"),
output_scale=relay.const(0.00784314, "float32"),
output_zero_point=relay.const(127, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((217, 204, 203, 191)).reshape((1, 4)),
np.array((102, 204, 242, 114)).reshape((1, 4)),
np.array((102, 204, 114, 229)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_different_io_qnn_params():
data_dtype = "uint8"
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.0156863, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.0117647, "flo |
at32"),
rhs_zero_point=relay.const(85, "int32"),
output_scale=relay.const(0.0235294, "float32"),
output_zero_point=relay.const(128, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((120, 154, 167, 124)).reshape((1, 4)),
np.array((158, 154, 154, 150)).reshape((1, 4)),
np.array((120, 154, 124, 163)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_saturation():
data_dtype = "uint8"
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.125, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.125, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.125, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
mod = relay.transform.InferType()(mod)
x_data = np.arr |
ay((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 128, 0)).reshape((1, 4))
golden_output = np.array((255, 255, 129, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.125, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.125, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.25, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.125, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.125, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.25, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", devic |
e=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.5, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.25, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.125, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 0, 1, 0)).reshape((1, 4))
y_data = np.array((0, 128, 64, 0)).reshape((1, 4))
golden_output = np.array((255, 255, 132, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_ignore_channel_axis():
data_dtype = "uint8"
x = relay.var("x", shape=(4,), dtype=data_dtype)
y = relay.var("y", shape=(4,), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.00784314, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.00784314, "float32"),
rhs_zero_point=relay.const(127, "int32"),
output_scale=relay.const(0.00784314, "float32"),
output_zero_point=relay.const(127, "int32"),
lhs_axis=1,
rhs_axis=1,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
test_ignore_channel_axis() |
import tvm |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
def legalize_qnn_batch_matmul(attrs, inputs, types):
return None
def make_requantize_params(input_scale, output_scale, output_zero_point, out_dtype):
config = {
"input_scale": input_scale,
"output_scale": output_scale,
"output_zero_point": output_zero_point,
"out_dtype": out_dtype,
}
return config
def make_configuration(
quantized_x,
quantized_y,
dtype,
x_shape,
y_shape,
x_zero_point,
y_zero_point,
x_scale,
y_scale,
output,
out_dtype="int32",
requantize=None,
):
config = {
"quantized_x": quantized_x,
"quantized_y": quantized_y,
"dtype": dtype,
"x_shape": x_shape,
"y_shape": y_shape,
"x_zero_point": x_zero_point,
"y_zero_point": y_zero_point,
"x_scale": x_scale,
"y_scale": y_scale,
"output": output,
"out_dtype": out_dtype,
"requantize": requantize,
}
return config
def make_int_configuration(
xzero_point_zero=True,
yzero_point_zero=True,
requantize_output=False,
per_channel=False,
batch_size=1,
):
x_shape, y_shape, output_shape = (batch_size, 4, 5), (batch_size, 3, 5), (batch_size, 4, 3)
if xzero_point_zero == True:
x_zero_point = 0
else:
x_zero_point = -123
if yzero_point_zero == True:
y_zero_point = 0
else:
y_zero_point = -123
in_dtype = "int8"
out_dtype = "int32" if not requantize_output else "int8"
quantized_x_np = (
np.array(
[
1,
3,
5,
7,
9,
11,
13,
15,
-19,
-21,
1,
3,
5,
7,
9,
11,
13,
-17,
17,
-21,
]
)[
np.newaxis, np.new |
axis, :
]
.repeat(batch_size, axis=1)
.astype(in_dtype)
.reshape(x_shape)
)
quantized_y_np = (
np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 1, 3, 5, 7, 9])[np.newaxis, np.newaxis, :]
.repeat(batch_size, axis=1)
.astype(in_dtype)
.reshape(y_shape)
)
x_scale = 0.5
y_scale = 0.5
output_scale = 2.0
if requantize_output:
assert xzero_point_zero is True
assert yzero_point_zero is True
output = np.array([20, 51, 20, -26, -27, -26, 20, 51, 20, -14, -10, -14])
elif xzero_point_zero is False and yzero_point_zero is False:
output = np.array(
[81960, 88360, 81960, 78400, 84540, 78400, 81960, 88360, 81960, 78984, 85164, 78984]
)
elif xzero_point_zero is True and yzero_point_zero is False:
output = np.array([3240, 3490, 3240, -320, -330, -320, 3240, 3490, 3240, 264, 294, 264])
elif xzero_point_zero is False and yzero_point_zero is True:
output = np.array([3240, 9640, 3240, 2878, 9018, 2878, 3240, 9640, 3240, 2970, 9150, 2970])
else:
output = np.array([165, 415, 165, -197, -207, -197, 165, 415, 165, -105, -75, -105])
requant_params = (
make_requantize_params(x_scale * y_scale, output_scale, -1, "int8")
if requantize_output
else None
)
output = (
output[np.newaxis, np.newaxis, :]
.repeat(batch_size, axis=1)
.astype(out_dtype)
.reshape(output_shape)
)
return make_configuration(
quantized_x=quantized_x_np,
quantized_y=quantized_y_np,
dtype=in_dtype,
x_shape=x_shape,
y_shape=y_shape,
x_zero_point=x_zero_point,
y_zero_point=y_zero_point,
x_scale=x_scale,
y_scale=y_scale,
output=output,
requantize=requant_params,
)
def qnn_batch_matmul_driver(test_configuration):
in_dtype = test_configuration["dtype"]
out_dtype = test_configuration["out_dtype"]
quantized_x_name = "qua |
ntized_x"
quantized_y_name = "quantized_y"
expected_out_dtype = test_configuration["out_dtype"]
quantized_x = relay.var(quantized_x_name, shape=test_configuration["x_shape"], dtype=in_dtype)
quantized_y = relay.var(quantized_y_name, shape=test_configuration["y_shape"], dtype=in_dtype)
mod = relay.qnn.op.batch_matmul(
quantized_x,
quantized_y,
relay.const(test_configuration["x_zero_point"], "int32"),
relay.const(test_configuration["y_zero_point"], "int32"),
relay.const(test_configuration["x_scale"], "float32"),
relay.const(test_configuration["y_scale"], "float32"),
)
if test_configuration["requantize"] is not None:
requantize_config = test_configuration["requantize"]
mod = relay.qnn.op.requantize(
mod,
input_scale=relay.const(requantize_config["input_scale"], "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(requantize_config["output_scale"], "float32"),
output_zero_point=relay.const(requantize_config["output_zero_point"], "int32"),
out_dtype=requantize_config["out_dtype"],
)
expected_out_dtype = requantize_config["out_dtype"]
mod = relay.Function(relay.analysis.free_vars(mod), mod)
mod = tvm.IRModule.from_expr(mod)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(mod, "llvm", params=None)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(quantized_x_name, test_configuration[quantized_x_name])
mod.set_input(quantized_y_name, test_configuration[quantized_y_name])
mod.set_input(**params)
mod.run()
res = mod.get_output(0).numpy()
np.testing.assert_equal(res, test_configuration["output"])
assert res.dtype == expected_out_dtype
def test_qnn_batch_matmul_xzp0_yzp0():
with TempOpAt |
tr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=True, yzero_point_zero=True, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul_xzp0():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=True, yzero_point_zero=False, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul_yzp0():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=False, yzero_point_zero=True, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=False, yzero_point_zero=False, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul_with_requantized_output():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int8_requantized_output_params = make_int_configuration(
requantize_output=True, batch_size=batch_size
)
qnn_batch_matmul_driver(int8_requantized_output_params)
if __name__ == "__main__":
test_qnn_batch_matmul_xzp0_yzp0()
test_qnn_batch_matmul_xzp0()
test_qnn_batch_matmul_yzp0()
test_qnn_batch_matmul()
test_qnn_batch_matmul_with_requantized_output() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.contrib |
import graph_executor |
import tvm.topi.testing
def test_same_io_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
zero = relay.const(0, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(zero, zero),
output_scale=y_scale,
output_zero_point=zero,
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data, y_data), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_different_io_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
x_zero_point = relay.const(3, "int32")
y_zero_point = relay.const(4, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(1, "int32"),
axis=axis,
)
func = relay.Function([x, y], z) |
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data - 2, y_data - 3), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_few_same_io_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
x_zero_point = relay.const(0, "int32")
y_zero_point = relay.const(1, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(1, "int32"),
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data + 1, y_data), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_same_i_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
x_zero_point = |
relay.const(0, "int32")
y_zero_point = relay.const(0, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(1, "int32"),
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data + 1, y_data + 1), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_call_input():
x_data = np.ones(shape=(64,)).astype("uint8")
x = relay.var("x", shape=(64,), dtype="uint8")
x_scale = relay.const(1, "float32")
y_scale = relay.const(1, "float32")
x_zero_point = relay.const(0, "int32")
y_zero_point = relay.const(0, "int32")
tup = relay.split(x, 2, axis=0)
z = relay.qnn.op.concatenate(
tup,
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(0, "int32"),
axis=0,
)
func = relay.Function([x], z)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(x_data)
np.testing.assert_equal(op_res.numpy(), x_data)
if __name__ == "__main__":
test_call_input()
test_same_io_qnn_params()
test_different_io_qnn_params()
test_few_same_io_qnn_params()
test_same_i_qnn_params() |
import numpy as np |
import tvm
from tvm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.