text
stringlengths 1
2.05k
|
---|
import pytest |
import tvm
from tvm |
import te, topi
from tvm.testing |
import assert_allclose
from tvm.topi.utils |
import get_const_tuple
def check_grad(
out, inputs, args=[], data_range=(-10, 10), desired_grads=None, assert_no_jacobian=True
):
inputs = inputs if isinstance(inputs, list) else [inputs]
def check_device(device, host="llvm"):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(host):
return
sout = te.create_schedule(out.op)
mout = tvm.build(sout, [out] + inputs + args)
out_shape = get_const_tuple(out.shape)
l, h = data_range
input_data = [
tvm.nd.array(
np.random.uniform(l, h, size=get_const_tuple(input.shape)).astype(input.dtype)
)
for input in inputs
]
arg_vals = [
tvm.nd.array(np.random.uniform(l, h, size=get_const_tuple(arg.shape)).astype(arg.dtype))
for arg in args
]
ones = topi.full_like(out, 1.0)
grads = te.gradient(out, inputs, head=ones)
grad_sched = te.create_schedule([grad.op for grad in grads])
mgrad = tvm.build(grad_sched, list(grads) + inputs + args)
if assert_no_jacobian:
lowered_ir = str(tvm.lower(grad_sched, list(grads) + inputs + args, simple_mode=True))
assert "jacobian" not in lowered_ir, lowered_ir
grad_data = [tvm.nd.empty(get_const_tuple(i.shape), g.dtype) for i, g in zip(inputs, grads)]
mgrad(*grad_data, *input_data, *arg_vals)
g_res = [g.numpy() for g in grad_data]
if desired_grads:
assert isinstance(desired_grads, list)
for actual, desired in zip(g_res, desired_grads):
assert_allclose(actual, desired, rtol=0.1, atol=1e-2)
else:
def forward(*in_data):
out_data = tvm.nd.empty(out_shape, out.dtype)
mout(out_data, *[tvm.nd.array(d) for d in list(in_data)])
return out_data.numpy().sum()
tvm.testing.check_numerical_grads(
forward, [d.num |
py() for d in input_data + arg_vals], g_res
)
check_device("cpu")
def test_basic_operation():
np.random.seed(0)
shape = (10, 10)
x = te.var("x", dtype="float32")
k = te.reduce_axis((0, 10), name="k")
l = te.reduce_axis((0, 10), name="l")
A0 = te.placeholder(shape, name="A0")
A1 = te.placeholder(shape, name="A1")
zeros = np.zeros(shape)
B = te.compute(shape, lambda i, j: A0[i, j], name="B")
check_grad(B, [A0])
B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name="B")
check_grad(B, [A0, A1])
B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0, data_range=(0.1, 10))
B = te.compute(shape, lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]), name="B")
check_grad(B, A0, data_range=(-4, 4))
B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name="B")
check_grad(B, A0)
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k), name="B")
check_grad(B, A0)
B |
= te.compute(shape, lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]), name="B")
check_grad(B, [A0, A1])
B = te.compute(
shape, lambda i, j: te.sum(A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k), name="B"
)
check_grad(B, A0)
def fcombine(x, y):
return x * y
def fidentity(t0):
return tvm.tir.const(1, t0)
prod = te.comm_reducer(fcombine, fidentity, name="prod")
B = te.compute((10, 10), lambda i, j: prod(A0[i, k] + A0[k, i], axis=k), name="B")
check_grad(B, A0)
X = te.placeholder((10,), name="X")
A = te.compute((10,), lambda i: X[i] + X[9 - i])
B = te.compute((10,), lambda i: X[i] * X[9 - i])
Y = topi.tensordot(A, B, 1)
check_grad(Y, X)
X = te.placeholder((3, 3), name="X")
Y = topi.einsum("ii->i", (X))
check_grad(Y, X)
def test_topi():
X = te.placeholder((1, 2, 4, 4), name="X")
W = te.placeholder((5, 2, 3, 3), name="W")
W1 = te.placeholder((2, 5, 3, 3), name="W1")
W2 = te.placeholder((1,), name="W2")
R = topi.nn.conv2d(X, W, 1, 1, 1)
check_grad(R, [X, W])
R1 = topi.nn.conv2d(topi.nn.relu(R), W1, 1, 0, 1)
check_grad(R1, [X, W, W1])
R = topi.broadcast_to(W2, (5, 2, 3, 3))
check_grad(R, [W2])
R = topi.nn.conv2d(X, topi.broadcast_to(W2, (5, 2, 3, 3)), 1, 1, 1)
check_grad(R, [X, W2])
R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "avg")
check_grad(R, X)
R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(R, X)
X = te.placeholder((1, 2, 5, 5), name="X")
R = topi.reshape(X, (1, 32))
check_grad(R, [X])
X = te.placeholder((1, 2, 5, 5), name="X")
W = te.placeholder((2, 2, 3, 3), name="W")
S = topi.reshape(X, (1, 50))
check_grad(S, [X])
R = X + topi.nn.conv2d(X |
+ topi.nn.conv2d(X, W, 1, 1, 1), W, 1, 1, 1)
check_grad(R, [X, W])
S = topi.nn.softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.sigmoid(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.tanh(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.nn.log_softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
check_grad(S, [W], [X])
X = te.placeholder((1, 2, 3, 5), name="X")
Y = te.placeholder((1, 2, 7, 5), name="Y")
S = topi.concatenate((X, Y), 2)
check_grad(S, [X, Y])
X = te.placeholder((1, 2, 6, 5), name="X")
(S, R) = topi.split(X, 2, 2)
check_grad(S, [X])
check_grad(R, [X])
R1 = topi.concatenate((S, R), 2)
check_grad(R1, [X])
R2 = topi.concatenate((R, S), 2)
check_grad(R2, [X])
X = te.placeholder((4, 5), name="X")
I = te.placeholder((100,), name="I", dtype="int32")
R = topi.take(X, topi.abs(I))
check_grad(R, [X], [I])
W = te.placeholder((5, 5), name="W")
exps = topi.exp(topi.nn.dense(X, W))
sumexps = topi.sum(exps, axis=-1, keepdims=True)
R = exps / sumexps
check_grad(R, [X, W], data_range=(-1, 1))
def test_stride_dilation():
X = te.placeholder((1, 2, 10, 10), name="X")
W = te.placeholder((2, 2, 1, 1), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 2, 2), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X |
, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 3, 3), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
@pytest.mark.xfail
def test_reduction_init():
np.random.seed(0)
shape = (10, 10)
k = te.reduce_axis((0, 10), |
name="k")
A0 = te.placeholder(shape, name="A0")
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k, init=0.0), name="B")
check_grad(B, A0)
if __name__ == "__main__":
test_basic_operation()
test_topi()
test_stride_dilation() |
import tvm
from tvm |
import te
def test_lower_rfactor():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B.op].bind(xo, te.thread_axis("blockIdx.x"))
s[B.op].bind(xi, te.thread_axis("threadIdx.y"))
s[B].bind(s[B].op.reduce_axis[0], te.thread_axis("threadIdx.x"))
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
fapi = tvm.lower(s, [A, B])
def test_dependent_output_shape():
n, m, x = te.size_var("n"), te.size_var("m"), te.size_var("x")
A = te.placeholder((n, m))
B = te.compute((m, n
s = te.create_schedule(B.op)
mod = tvm.build(s, [A, B, x])
def test_split_uneven_unique_likely():
a = te.placeholder(
(16, 16),
)
b = te.placeholder(
(16, 16),
)
c = te.compute((16, 16), lambda x, y: a[x, y] + b[x, y])
x, y = c.op.axis
sch = te.create_schedule(c.op)
xo, xi = sch[c].split(x, 5)
stmt = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.IfThenElse)
if __name__ == "__main__":
test_lower_rfactor()
test_dependent_output_shape()
test_split_uneven_unique_likely() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te, tir, topi
from tvm.script |
import tir as T
def test_unique_name_complete_block():
A = te.placeholder((16, 16), name="A")
B = te.compute((16, 16), lambda x, y: A[x, y] * 2, name="main")
C = te.compute((16, 16), lambda x, y: B[x, y] + 1, name="main")
func = te.create_prim_func([A, C])
s = tir.Schedule(func, debug_mask="all")
assert isinstance(s.get_sref(s.get_block("main")), tir.schedule.StmtSRef)
assert isinstance(s.get_sref(s.get_block("main_1")), tir.schedule.StmtSRef)
def test_unique_name_reduction_block():
k1 = te.reduce_axis((0, 16), "k1")
k2 = te.reduce_axis((0, 16), "k2")
A = te.placeholder((16, 16), name="A")
B = te.compute((16,), lambda i: te.sum(A[i, k1], axis=k1), name="sum")
C = te.compute((), lambda: te.sum(B[k2], axis=k2), name="sum")
func = te.create_prim_func([A, C])
s = tir.Schedule(func, debug_mask="all")
assert isinstance(s.get_sref(s.get_block("sum")), tir.schedule.StmtSRef)
assert isinstance(s.get_sref(s.get_block("sum_1")), tir.schedule.StmtSRef)
def _check_workload(te_workload, tir_workload):
func = te.create_prim_func(te_workload())
tvm.ir.assert_structural_equal(func, tir_workload)
s = tir.Schedule(func, debug_mask="all")
assert s
def te_matmul():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
return [A, B, C]
@T.prim_func
def tir_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0, k0 in T.grid(128, 128, 128):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
def test_matmul():
_check_workload(te_matmul, tir_matmul)
def te_e |
lement_wise():
A = te.placeholder((128, 128), name="A")
B = te.compute((128, 128), lambda x, y: A[x, y] * 2, name="B")
C = te.compute((128, 128), lambda x, y: B[x, y] + 1, name="C")
return [A, C]
@T.prim_func
def tir_element_wise(a: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
B[i, j] = A[i, j] * 2.0
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
C[i, j] = B[i, j] + 1.0
def test_element_wise():
_check_workload(te_element_wise, tir_element_wise)
def te_conv2d():
batch = 16
in_channel = 16
out_channel = 32
size = 14
kernel = 3
A = te.placeholder((batch, in_channel, size, size), name="A")
W = te.placeholder((in_channel, kernel, kernel, out_channel), name="W")
Apad = te.compute(
(batch, in_channel, size + 2, size + 2),
lambda nn, cc, yy, xx: tvm.tir.if_then_else(
tvm.tir.all(yy >= 1, yy - 1 < size, xx >= 1, xx - 1 < size),
A[nn, cc, yy - 1, xx - 1],
0.0,
),
name="Apad",
)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel), name="ry")
rx = te.reduce_axis((0, kernel), name="rx")
B = te.compute(
(batch, out_channel, size, size),
lambda nn, ff, yy, xx: te.sum(
Apad[nn, rc, yy + ry, xx + rx] * W[rc, ry, rx, ff], axis=[rc, ry, rx]
),
name="B",
)
return [A, W, B]
@T.prim_func
def tir_conv2d(a: T.handle, w: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 16, 14, 14])
W = T.match_buffer(w, [16, 3, 3, 32])
B = T.match_buffer(b, [16, 32, 14, 14])
Apad = T.alloc_bu |
ffer([16, 16, 16, 16])
for n, c, y, x in T.grid(16, 16, 16, 16):
with T.block("Apad"):
nn, cc, yy, xx = T.axis.remap("SSSS", [n, c, y, x])
Apad[nn, cc, yy, xx] = T.if_then_else(
1 <= yy and yy < 15 and 1 <= xx and xx < 15,
A[nn, cc, yy - 1, xx - 1],
0.0,
dtype="float32",
)
for n, f, y, x, kc, ky, kx in T.grid(16, 32, 14, 14, 16, 3, 3):
with T.block("B"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap("SSSSRRR", [n, f, y, x, kc, ky, kx])
with T.init():
B[nn, ff, yy, xx] = 0.0
B[nn, ff, yy, xx] += Apad[nn, rc, yy + ry, xx + rx] * W[rc, ry, rx, ff]
def test_conv2d():
_check_workload(te_conv2d, tir_conv2d)
def te_multi_output():
n = te.var("n")
m = te.var("m")
A0 = te.placeholder((m, n), name="A0")
A1 = te.placeholder((m, n), name="A1")
B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A1[i, j] * 3), name="B")
return [A0, A1, B0, B1]
@T.prim_func
def tir_multi_output(a0: T.handle, a1: T.handle, b0: T.handle, b1: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.var("int32")
n = T.var("int32")
A0 = T.match_buffer(a0, (m, n))
A1 = T.match_buffer(a1, (m, n))
B0 = T.match_buffer(b0, (m, n))
B1 = T.match_buffer(b1, (m, n))
for i0, i1 in T.grid(m, n):
with T.block("B.v0"):
i, j = T.axis.remap("SS", [i0, i1])
B0[i, j] = A0[i, j] + 2.0
with T.block("B.v1"):
i, j = T.axis.remap("SS", [i0, i1])
B1[i, j] = A1[i, j] * 3.0
def test_multi_output():
_check_workload(te_multi_output, tir_multi_output)
def te_extern():
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.extern(
(128, 128),
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], 0, 0
), |
name="C",
)
return [A, B, C]
@T.prim_func
def tir_extern(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
off1 = te.var("elem_offset")
off2 = te.var("elem_offset_1")
off3 = te.var("elem_offset_2")
A = T.match_buffer(a, (128, 128), elem_offset=off1)
B = T.match_buffer(b, (128, 128), elem_offset=off2)
C = T.match_buffer(c, (128, 128), elem_offset=off3)
with T.block("C"):
T.reads([A[0:128, 0:128], B[0:128, 0:128]])
T.writes([C[0:128, 0:128]])
T.evaluate(
T.tvm_call_packed(
"tvm.contrib.cblas.matmul",
T.tvm_stack_make_array(
A.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off1,
dtype="handle",
),
T.tvm_stack_make_array(
B.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off2,
dtype="handle",
),
T.tvm_stack_make_array(
C.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off3,
dtype="handle",
),
0,
0,
dtype="int32",
)
)
def test_extern():
_check_workload(te_extern, tir_extern)
def te_reordered_matmul():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
return [C, A, B]
@T.prim_func
def tir_reordered_matmul(c: T.handle, a: T.handle, b: T.handle) -> None: |
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0, k0 in T.grid(128, 128, 128):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
def test_arg_order():
_check_workload(te_reordered_matmul, tir_reordered_matmul)
def te_scan():
m = te.var("m")
n = te.var("n")
X = te.placeholder((m, n), name="X")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: X[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i])
s_scan = tvm.te.scan(s_init, s_update, s_state, inputs=[X])
return [X, s_scan]
def test_error_reporting():
try:
te.create_prim_func(te_scan())
assert False
except TypeError as e:
error_message = str(e)
assert error_message.find("Unsupported Operation: ScanOp.") != -1
return
assert False
def test_constant():
M = 11
A = te.placeholder((M,), name="A")
B = te.compute(tuple(), lambda: 2, name="B")
C = te.compute(
(M,), lambda x: A[x] + tvm.tir.expr.ProducerLoad(B, []), name="C", tag="broadcast"
)
func = te.create_prim_func([C, A])
func = tvm.build(func)
a_np = np.random.uniform(size=(M,)).astype(A.dtype)
c = tvm.nd.array(np.zeros(M, dtype=C.dtype))
x = func(c, tvm.nd.array(a_np))
tvm.testing.assert_allclose(a_np + 2, c.numpy())
def test_data_dependent_access():
A = te.placeholder((10,), name="A")
B = te.placeholder((10,), name="B", dtype="int32")
C = te.compute((10,), lambda i: A[B[i]])
func = te.create_prim_func([C, A, B])
func = tvm.build(func)
a_np = np.random.uniform(size=(10,)).astype(A.dtype)
b_np = np.arange(10, dtype=B.dtype)
c = tvm.nd.array(np.zeros(10, dtype=C.dtype))
func(c, tvm.nd.array(a_np), tvm.nd.array |
(b_np))
tvm.testing.assert_allclose(a_np[b_np], c.numpy())
def test_select_simplify():
placeholder = te.placeholder([1, 128, 10, 10, 4], dtype="float32")
tensor = topi.nn.adaptive_pool(placeholder, [1, 1], "avg", "NCHW4c")
result = te.create_prim_func([placeholder, tensor])
script_func = result.script()
assert script_func.find("Select") == -1
assert script_func.find("Var") == -1
def test_tensor_attr():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute(
(128, 128),
lambda x, y: te.sum(A[x, k] * B[y, k], axis=k),
name="C",
attrs={"layout_free_placeholders": [B]},
)
func = te.create_prim_func([A, B, C])
rt_func = tvm.script.from_source(func.script())
tvm.ir.assert_structural_equal(func, rt_func)
@T.prim_func
def expected_layout_attr(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
D: T.Buffer[(128, 128), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
C = T.alloc_buffer([128, 128], dtype="float32")
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C"):
x, y, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C[x, y] = T.float32(0)
C[x, y] = C[x, y] + A[x, k] * B[y, k]
for i0, i1 in T.grid(128, 128):
with T.block("D"):
x, y = T.axis.remap("SS", [i0, i1])
D[x, y] = C[x, y] + T.float32(1)
def test_tensor_layout_attr():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute(
(128, 128),
lambda x, y: te.sum(A[x, k] * B[y, k], axis=k),
name="C",
attrs={"layout_free_placeholders": [B]},
)
D = te.compute(
(128, 128),
lambda x, y: C[x, y] + 1,
name="D",
attrs={"layou |
t_free_placeholders": [C]},
)
func = te.create_prim_func([A, B, D])
tvm.ir.assert_structural_equal(func, expected_layout_attr)
def te_argmax_idx_val():
def f_combine(x, y):
lhs = tvm.tir.Select((x[1] >= y[1]), x[0], y[0])
rhs = tvm.tir.Select((x[1] >= y[1]), x[1], y[1])
return lhs, rhs
def f_identity(dtype0: tvm.DataType, dtype1: tvm.DataType):
return tvm.tir.const(-1, dtype0), tvm.te.min_value(dtype1)
argmax = te.comm_reducer(f_combine, f_identity, name="argmax")
m = te.var("m")
n = te.var("n")
idx = te.placeholder((m, n), name="idx", dtype="int32")
val = te.placeholder((m, n), name="val", dtype="float32")
k = te.reduce_axis((0, n), "k")
max_idx, max_val = te.compute(
(m,), lambda i: argmax((idx[i, k], val[i, k]), axis=k), name="argmax"
)
return [idx, val, max_idx, max_val]
@T.prim_func
def tir_argmax_idx_val(
var_idx: T.handle, var_val: T.handle, var_argmax_v0: T.handle, var_argmax_v1: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.var("int32")
n = T.var("int32")
idx = T.match_buffer(var_idx, [m, n], dtype="int32")
val = T.match_buffer(var_val, [m, n], dtype="float32")
argmax_v0 = T.match_buffer(var_argmax_v0, [m], dtype="int32")
argmax_v1 = T.match_buffer(var_argmax_v1, [m], dtype="float32")
for i0, i1 in T.grid(m, n):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(val[i, k], idx[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = T.int32(-1)
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def te_argmax_val_idx():
def f_combine(x, y):
l |
hs = tvm.tir.Select((x[0] >= y[0]), x[0], y[0])
rhs = tvm.tir.Select((x[0] >= y[0]), x[1], y[1])
return lhs, rhs
def f_identity(dtype0: tvm.DataType, dtype1: tvm.DataType):
return tvm.te.min_value(dtype0), tvm.tir.const(-1, dtype1)
argmax = te.comm_reducer(f_combine, f_identity, name="argmax")
m = te.var("m")
n = te.var("n")
val = te.placeholder((m, n), name="val", dtype="float32")
idx = te.placeholder((m, n), name="idx", dtype="int32")
k = te.reduce_axis((0, n), "k")
max_val, max_idx = te.compute(
(m,), lambda i: argmax((val[i, k], idx[i, k]), axis=k), name="argmax"
)
return [val, idx, max_val, max_idx]
@T.prim_func
def tir_argmax_val_idx(
var_val: T.handle, var_idx: T.handle, var_argmax_v0: T.handle, var_argmax_v1: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.var("int32")
n = T.var("int32")
val = T.match_buffer(var_val, [m, n], dtype="float32")
idx = T.match_buffer(var_idx, [m, n], dtype="int32")
argmax_v0 = T.match_buffer(var_argmax_v0, [m], dtype="float32")
argmax_v1 = T.match_buffer(var_argmax_v1, [m], dtype="int32")
for i0, i1 in T.grid(m, n):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(val[i, k], idx[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = T.min_value("float32")
argmax_v1[i] = T.int32(-1)
v_argmax_v0: T.float32 = T.Select(argmax_v0[i] >= val[i, k], argmax_v0[i], val[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v0[i] >= val[i, k], argmax_v1[i], idx[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def test_argmax_idx_val():
_check_workload(te_argmax_idx_val, tir_argmax_idx_val)
def test_argmax_val_idx():
_check_workload(te_argmax_val_idx, tir_argmax_val_idx)
def test_int64_indices():
n = te.var("n", "int64")
A = te.placeholder((n,), |
name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
prim_func = te.create_prim_func([A, B])
loop = prim_func.body.block.body
assert loop.loop_var.dtype == "int64"
assert loop.min.dtype == "int64"
assert loop.extent.dtype == "int64"
def test_zero_dim_add():
def te_func():
a = te.placeholder((), name="a", dtype="int32")
b = te.placeholder((), name="b", dtype="int32")
c = te.compute(a.shape, lambda *i: a(*i) + b(*i), name="c")
return [a, b, c]
@T.prim_func
def expected(
a: T.Buffer[(), "int32"],
b: T.Buffer[(), "int32"],
c: T.Buffer[(), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
with T.block("c"):
vi = T.axis.spatial(1, 0)
T.reads(a[()], b[()])
T.writes(c[()])
c[()] = a[()] + b[()]
_check_workload(te_func, expected)
if __name__ == "__main__":
test_unique_name_complete_block()
test_unique_name_reduction_block()
test_matmul()
test_element_wise()
test_conv2d()
test_multi_output()
test_extern()
test_arg_order()
test_error_reporting()
test_constant()
test_select_simplify()
test_tensor_attr()
test_tensor_layout_attr()
test_argmax_idx_val()
test_argmax_val_idx()
test_int64_indices()
test_zero_dim_add() |
"""Test group effect""" |
import tvm
from tvm |
import te
def test_scan_group():
m = te.size_var("m")
n = te.size_var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i])
s_update1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + x[t, i])
s_update2 = te.compute((m, n), lambda t, i: s_update1[t, i] + 1)
s_update3 = te.compute((m, n), lambda t, i: s_update2[t, i] + 1)
res = tvm.te.scan(s_init, s_update3, s_state, inputs=x)
s = te.create_schedule(res.op)
assert s[s_update1].group is not None
assert s[s_update2].group == s[s_update1].group
s[s_update1].compute_at(s[s_update2], s_update2.op.axis[1])
g2 = s.create_group(outputs=s_update2, inputs=[s_state, x])
assert g2.group is not None
assert g2.group == s[s_update3].group
assert s[s_update2].group == g2
assert s[s_update1].group == g2
g2.compute_at(s[s_update3], s_update3.op.axis[1])
assert g2.attach_stage == s[s_update3]
try:
s[s_update2].compute_at(s[s_init], s_init.op.axis[0])
assert False
except tvm.error.TVMError:
pass
def test_compute_group():
m = te.size_var("m")
n = te.size_var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g = s.create_group(outputs=x1, inputs=x, include_inputs=True)
assert s[x1].group == g
assert s[x].group == g
g.compute_at(s[x2], x2.op.axis[1])
assert g.attach_stage == s[x2]
assert g.num_child_stages == 2
def test_nest_group():
m = te.size_var("m")
n = te.size_var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op) |
g1 = s.create_group(outputs=x1, inputs=x)
g2 = s.create_group(outputs=x1, inputs=x, include_inputs=True)
assert set(s.groups) == set([g1, g2])
assert s[x].group == g2
assert s[x1].group == g1
assert g1.group == g2
assert g2.num_child_stages == 2
assert g1.num_child_stages == 1
if __name__ == "__main__":
test_nest_group()
test_compute_group()
test_scan_group() |
import tvm, inspect, sys, traceback, numpy, pytest, types, os
from tvm |
import te
from tvm.contrib |
import utils
from tvm.te.hybrid |
import script
from tvm.te.hybrid.runtime |
import HYBRID_GLOBALS |
import tvm.testing
@pytest.mark.skip
def run_and_check(func, args, var_dict={}, target="llvm", sch=None, outs=None):
def tvm_val_2_py_val(val):
val = tvm.tir.stmt_functor.substitute(val, var_dict)
val = tvm.arith.Analyzer().simplify(val)
assert isinstance(val, (tvm.tir.IntImm,))
return val.value
dev = tvm.device(target, 0)
op = None
if sch is None:
outs = func(*tuple(tvm.runtime.convert(i) if isinstance(i, list) else i for i in args))
op = outs[0].op if isinstance(outs, list) else outs.op
sch = te.create_schedule(op)
else:
assert outs is not None
assert isinstance(outs, list)
op = outs[0].op
emu_args = []
nd_args = []
for i in args:
if isinstance(i, te.tensor.Tensor):
shape = [tvm_val_2_py_val(j) for j in i.shape]
emu_args.append(numpy.random.randn(*shape).astype(i.dtype))
nd_args.append(tvm.nd.array(emu_args[-1], dev))
elif isinstance(i, tvm.tir.Var):
emu_args.append(tvm_val_2_py_val(i))
nd_args.append(emu_args[-1])
else:
assert isinstance(i, list)
emu_args.append(numpy.array(i))
compile_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))] + (
outs if isinstance(outs, list) else [outs]
)
module = tvm.build(sch, compile_args, target=target)
assert module
out_tensors = []
for i in range(op.num_outputs):
output = op.output(i)
shape = [tvm_val_2_py_val(j) for j in output.shape]
nd_args.append(tvm.nd.array(numpy.zeros(shape).astype(output.dtype), dev))
out_tensors.append(nd_args[-1])
ref_data = func(*emu_args)
if isinstance(ref_data, numpy.ndarray):
ref_data = [ref_data]
module(*nd_args)
for nd, np in zip(out_tensors, ref_data):
tvm.testing.assert_allclose(nd.numpy(), np, rtol=1e-5, atol=1e-5)
module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var |
))]
module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
h_module = te.hybrid.build(sch, module_args, module_outs)
return h_module, module_args, module_outs
@script
def outer_product(n, m, a, b):
"""This is a simple outer product.
Actually this function is not required to be documented.
I write this docstring to test skipping docstring functionality.
"""
c = output_tensor((n, m), a.dtype)
for i in range(n):
for j in range(m):
assert i < n and j < m, "index out of range!"
c[i, j] = a[i] * b[j]
return c
@tvm.testing.skip_if_wheel_test
def test_outer_product():
n = te.size_var("n")
m = te.size_var("m")
a = te.placeholder((n,), name="a")
b = te.placeholder((m,), name="b")
try:
c = outer_product(n, m, a, b)
ir = c.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == "could not get source code"
return
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i"
assert ir.min.value == 0
assert ir.extent.name == "n"
ibody = ir.body
assert isinstance(ibody, tvm.tir.For)
assert ibody.loop_var.name == "j"
assert ibody.min.value == 0
assert ibody.extent.name == "m"
jblock = ibody.body
assert isinstance(jblock, tvm.tir.SeqStmt)
jbody = jblock[0]
assert isinstance(jbody, tvm.tir.AssertStmt)
assert isinstance(jbody.message, tvm.tir.StringImm)
assert jbody.message.value == "index out of range!"
jbody = jblock[1]
assert isinstance(jbody, tvm.tir.ProducerStore)
assert jbody.producer.op.name == "c"
assert len(jbody.indices) == 2
assert jbody.indices[0].name == "i"
assert jbody.indices[1].name == "j"
assert isinstance(jbody.value, tvm.tir.Mul)
mul = jbody.value
assert isinstance(mul.a, tvm.tir.ProducerLoad)
assert mul.a.producer.name == "a"
assert mul.b.producer.name == "b"
func, ins, outs = run_and_check(outer_product, [n, m, a, |
b], {n: 99, m: 101})
temp = utils.tempdir()
path = temp.relpath("%s.py" % func.name)
func.save(path)
func_ = te.hybrid.HybridModule()
func_.load(path)
run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)
for key, _ in HYBRID_GLOBALS.items():
assert key not in globals().keys()
assert key not in outer_product.__globals__.keys()
@tvm.testing.skip_if_wheel_test
def test_fanout():
@script
def fanout(n, a):
three = 3.0
b = output_tensor((a.shape[0] - 3,), a.dtype)
for i in range(a.shape[0] - 3):
sigma = 0.0
for j in range(3):
sigma += a[i + j]
sigma = sigma / three
b[i] = sigma
return b
n = te.size_var("n")
a = te.placeholder((n,), "float32", name="a")
try:
b = fanout(n, a)
ir = b.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == "could not get source code"
return
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i"
assert ir.min.value == 0
assert tvm.ir.structural_equal(ir.extent, n - 3)
abody = ir.body
assert isinstance(abody, tvm.tir.ProducerRealize)
assert abody.bounds[0].min.value == 0
assert abody.bounds[0].extent.value == 1
assert abody.producer.op.name == "sigma"
rbody = abody.body
assert isinstance(rbody[0], tvm.tir.ProducerStore)
assert rbody[0].producer.op.name == "sigma"
assert len(rbody[0].indices) == 1
assert rbody[0].indices[0].value == 0
jloop = rbody[1]
assert jloop.loop_var.name == "j"
assert jloop.min.value == 0
assert jloop.extent.value == 3
jbody = jloop.body
assert isinstance(jbody, tvm.tir.ProducerStore)
assert len(jbody.indices) == 1
assert jbody.indices[0].value == 0
assert jbody.producer.op.name == "sigma"
assert isinstance(jbody.value, tvm.tir.Add)
value = jbody.value
assert isinstance(value.a, tvm.tir.ProducerLoad)
assert va |
lue.a.producer.name == "sigma"
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert value.b.producer.name == "a"
assert len(value.b.indices) == 1
assert tvm.ir.structural_equal(value.b.indices[0], ir.loop_var + jloop.loop_var)
divide = rbody[2]
assert isinstance(divide, tvm.tir.ProducerStore)
assert len(divide.indices) == 1
assert divide.indices[0].value == 0
value = divide.value
assert isinstance(value, tvm.tir.Mul)
assert value.a.producer.name == "sigma"
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert abs(value.b.value - (1 / 3.0)) < 1e-5
write = rbody[3]
assert isinstance(write, tvm.tir.ProducerStore)
assert write.producer.op.name == "b"
assert write.value.producer.name == "sigma"
assert len(write.value.indices) == 1
assert write.value.indices[0].value == 0
func, ins, outs = run_and_check(fanout, [n, a], {n: 10})
run_and_check(func, ins, {n: 10}, outs=outs)
def test_looptype():
@script
def looptype(a, b, c):
d = output_tensor((16,), "int32")
e = output_tensor((16,), "int32")
f = output_tensor((16,), "int32")
for i in parallel(16):
d[i] = a[i]
for j in vectorize(16):
e[j] = b[j]
for k in unroll(16):
f[k] = c[k]
return d, e, f
a = te.placeholder((16,), name="a", dtype="int32")
b = te.placeholder((16,), name="b", dtype="int32")
c = te.placeholder((16,), name="c", dtype="int32")
try:
d, e, f = looptype(a, b, c)
ir = d.op.body
except:
return
iloop = ir[0]
jloop = ir[1]
kloop = ir[2]
assert iloop.kind == tvm.tir.ForKind.PARALLEL
assert jloop.kind == tvm.tir.ForKind.VECTORIZED
assert kloop.kind == tvm.tir.ForKind.UNROLLED
func, ins, outs = run_and_check(looptype, [a, b, c])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_if():
@script
def if_then_else(a): |
b = output_tensor((10,), "int32")
c = output_tensor((10,), "int32")
for i in range(10):
if i % 2 == 0:
c[i] = a[i]
else:
c[i] = b[i]
for i in unroll(10):
b[i] = -1 if i % 2 == 0 else 1
return b, c
a = te.placeholder((10,), dtype="int32", name="a")
func, ins, outs = run_and_check(if_then_else, [a])
run_and_check(func, ins, outs=outs)
@script
def if_triple_condition(a):
b = output_tensor((10,), "int32")
for i in range(10):
if 0 <= i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_triple_condition, [a])
run_and_check(func, ins, outs=outs)
@script
def if_and(a):
b = output_tensor((10,), "int32")
for i in range(10):
if i >= 0 and i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_and, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_bind():
@script
def vec_add(a, b):
c = output_tensor((1000,), "float32")
for tx in bind("threadIdx.x", 1000):
c[tx] = a[tx] + b[tx]
return c
a = te.placeholder((1000,), dtype="float32", name="a")
b = te.placeholder((1000,), dtype="float32", name="b")
func, ins, outs = run_and_check(vec_add, [a, b], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@script
def raw(a, b):
c = output_tensor((1000,), "float32")
for i in range(1000):
c[i] = a[i] + b[i]
return c
c = raw(a, b)
sch = te.create_schedule(c.op)
x = te.thread_axis("threadIdx.x")
sch[c].bind(c.op.axis[0], x)
func, ins, outs = run_and_check(raw, [a, b], sch=sch, outs=[c], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@te.hybrid.script |
def foo(a):
c = output_tensor((a.shape[0],), a.dtype)
total = allocate((1,), a.dtype, "local")
len_i = a.shape[0]
len_j = a.shape[1]
for i in bind("threadIdx.x", len_i):
total[0] = 0.0
for k in const_range(len_j):
total[0] += a[i, k]
c[i] = total[0]
return c
a = te.placeholder((8, 4), "float32")
c = foo(a)
s = te.create_schedule(c.op)
ir = tvm.lower(s, [a, c])
func, ins, outs = run_and_check(foo, [a], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@te.hybrid.script
def max_threads(a):
b = output_tensor(a.shape, a.dtype)
n = a.shape[0]
m = max_num_threads(True)
for i in bind("threadIdx.x", m):
for j in bind("blockIdx.x", ceil_div(n, m)):
if i * m + j < n:
b[i * m + j] = a[i * m + j] + a[i * m + j]
return b
a = te.placeholder((10000,), "float32")
with tvm.target.Target("cuda"):
func, ins, outs = run_and_check(max_threads, [a], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@tvm.testing.skip_if_wheel_test
def test_math_intrin():
@script
def intrin_real(a):
b = output_tensor((8,), "float32")
b[0] = sqrt(a[0])
b[1] = log(a[1])
b[2] = exp(a[2])
b[3] = sigmoid(a[3])
b[4] = power(a[4], a[5])
b[5] = tanh(a[5])
b[6] = min(a[4], a[5])
b[7] = max(a[5], a[6])
return b
a8 = te.placeholder((8,), dtype="float32", name="a")
b8 = intrin_real(a8)
sch = te.create_schedule(b8.op)
func = tvm.build(sch, [a8, b8])
assert func
a = numpy.arange(2, 10).astype("float32")
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.zeros((8,), dtype="float32"))
b = intrin_real(a)
func(tvm_a, tvm_b)
tvm.testing.assert_allclose(b, tvm_b.numpy(), rtol=1e-5)
@script
def intrin_int(a):
b = output_tensor((1,), "int32") |
b[0] = popcount(a[0])
return b
a1 = te.placeholder((1,), dtype="int32")
b1 = intrin_int(a1)
sch = te.create_schedule(b1.op)
func = tvm.build(sch, [a1, b1])
assert func
a = numpy.array([114514]).astype("int32")
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.array([0]).astype("int32"))
b = intrin_int(a)
func(tvm_a, tvm_b)
assert tvm_b.numpy()[0] == b[0]
@tvm.testing.skip_if_wheel_test
def test_non_zero():
@te.hybrid.script
def blur(a):
b = output_tensor((30, 30), "float32")
for i in range(2, 32):
for j in range(2, 32):
s = 0.0
for di in range(3):
for dj in range(3):
s += a[i - di, j - dj]
b[i - 2, j - 2] = s / 9.0
return b
a = te.placeholder((32, 32), "float32", "a")
func, ins, outs = run_and_check(blur, [a])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype="float32")
for i in range(10):
for j in range(i, 10):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((10,), dtype="float32", name="a")
b = te.placeholder((10,), dtype="float32", name="b")
func, ins, outs = run_and_check(triangle, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_allocate():
@te.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), "float32")
for i in range(30):
ha = allocate((3, 30), "float32")
for j in range(3):
for k in range(30):
ha[j, k] = a[i + j, k] + a[i + j, k + 1] + a[i + j, k + 2]
for j in range(30):
b[i, j] = (ha[0, j] + ha[1, j] + ha[2, j]) / 9.0
return b
a = te.placeholder((32, 32), "float32", "a")
b = blur2d(a)
sch = te.create_schedule(b.op)
func, ins, outs = run_and_check(blur2d, [a]) |
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256,), "float32")
shared = allocate((256,), "float32", "shared")
for i in bind("threadIdx.x", 256):
shared[i] = a[i]
local = allocate((256,), "float32", "local")
for i in bind("threadIdx.x", 256):
local[i] = b[i]
for i in bind("threadIdx.x", 256):
c[i] = shared[i] + local[i]
return c
a = te.placeholder((256,), dtype="float32", name="a")
b = te.placeholder((256,), dtype="float32", name="b")
c = share_vec_add(a, b)
func, ins, outs = run_and_check(share_vec_add, [a, b], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@tvm.testing.skip_if_wheel_test
def test_upstream():
@te.hybrid.script
def upstream(a):
b = output_tensor((20,), "float32")
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20,), "float32")
b = te.placeholder((20,), "float32")
c = te.compute((20,), lambda x: a[x] + b[x])
d = upstream(c)
sch = te.create_schedule([c.op, d.op])
ir = tvm.lower(sch, [a, b, d])
func = tvm.build(sch, [a, b, d])
assert func
a = numpy.random.randn(20).astype("float32")
b = numpy.random.randn(20).astype("float32")
ref = numpy.zeros((20,), "float32")
for i in range(20):
ref[i] = (a[i] + b[i]) * i
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(b)
tvm_d = tvm.nd.array(numpy.zeros((20,)).astype("float32"))
func(tvm_a, tvm_b, tvm_d)
tvm.testing.assert_allclose(tvm_d.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_downstream():
@te.hybrid.script
def downstream(a):
b = output_tensor((20,), "float32")
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20,), "float32")
b = downstream(a)
c = te.compute((20,), lambda x: b[x] + 1.0)
sch = te.create_schedule(c.op)
module = |
tvm.build(sch, [a, c])
assert module
a = numpy.random.randn(20).astype("float32")
ref = numpy.zeros((20,)).astype("float32")
for i in range(20):
ref[i] = (a[i] * i) + 1.0
tvm_a = tvm.nd.array(a)
tvm_c = tvm.nd.array(numpy.zeros((20,)).astype("float32"))
module(tvm_a, tvm_c)
tvm.testing.assert_allclose(tvm_c.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_const_param():
@te.hybrid.script
def add_something(a, b):
c = output_tensor((11,), "int32")
for i in range(11):
c[i] = a[i] + b
return c
a = te.placeholder((11,), dtype="int32", name="a")
b = tvm.tir.const(11, "int32")
c = add_something(a, b)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c], "llvm")
assert module
np_a = numpy.arange(11).astype("int32")
np_b = 11
np_c = numpy.zeros((11,)).astype("int32")
nd_a = tvm.nd.array(np_a)
nd_c = tvm.nd.array(numpy.zeros((11,)).astype("int32"))
module(nd_a, nd_c)
ref = add_something(np_a, 11)
tvm.testing.assert_allclose(nd_c.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_value_index():
@te.hybrid.script
def kernel_a(a):
b = output_tensor((16,), "int32")
c = output_tensor((4, 4), "int32")
for i in range(16):
b[i] = a[i] + 2
c[i
return b, c
@te.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), "int32")
for i in range(4):
for j in range(4):
c[i, j] = a[i * 4 + j] * b[i, j]
return c
a = te.placeholder((16,), "int32")
b, c = kernel_a(a)
d = kernel_b(c, b)
sch = te.create_schedule(d.op)
module = tvm.build(sch, [a, d])
assert module
np_a = numpy.arange(16).astype("int32")
np_b, np_c = kernel_a(np_a)
ref = kernel_b(np_c, np_b)
res = tvm.nd.array(numpy.zeros((4, 4)).astype("int32"))
module(tvm.nd.array(np_a), res)
tvm.testing.assert_allclose(res.nump |
y(), ref)
@tvm.testing.skip_if_wheel_test
def test_func_call():
@te.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
for i in range(len(a)):
b[i] = i + 1.0
c = outer_product(10, 10, a, b)
d = output_tensor(c.shape, c.dtype)
for i in range(10):
for j in range(10):
d[i, j] = c[i, j] + i * j
return d
a = te.placeholder((10,), name="a")
b = te.placeholder((10,), name="b")
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_bool():
@te.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
for i in range(1, a.shape[0] - 1):
if a[i] * a[i - 1] < a[i] or a[i] * a[i - 1] < a[i - 1] or i * a[i] == a[i]:
b[i] = a[i]
else:
b[i] = 0.0
return b
a = te.placeholder((10,), name="a")
func, ins, outs = run_and_check(foo, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_const_range():
@te.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, "int32")
for i in const_range(2):
for j in const_range(5):
c[i, j] = float32(int32(a[i, j]) + b[i, j])
for i in const_range(len(b)):
for j in const_range(len(b[0])):
d[i, j] = int32(a[i, j] + b[i, j])
return c, d
a = te.placeholder((2, 5), name="a", dtype="float32")
b = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def goo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in const_range(len_b * 2):
if i < len_b:
c[i] = a[i] + b[i]
else:
c[i - len_b] = a[i - len_b] + b[i - len |
_b]
return c
a = te.placeholder((5,), name="a", dtype="int32")
b = [1, 2, 3, 4, 5]
c = goo(a, tvm.runtime.convert(b))
sch = te.create_schedule(c.op)
func, ins, outs = run_and_check(goo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def hoo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in range(a.shape[0]):
for j in const_range(len(b)):
d = a[i] * b[j]
d += a[i] + b[j]
c[i] = d
return c
a = te.placeholder((5,), name="a", dtype="int32")
b = [1, 2, 3, 4, 5]
func, ins, outs = run_and_check(hoo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_schedule():
@script
def outer_product(a, b):
c = output_tensor((64, 64), a.dtype)
for i in range(64):
for j in range(64):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((64,), name="a", dtype="float32")
b = te.placeholder((64,), name="b", dtype="float32")
c = outer_product(a, b)
sch = te.create_schedule(c.op)
i, j = c.op.axis
io, ii = sch[c].split(i, 4)
sch[c].parallel(ii)
jo, ji = sch[c].split(j, 4)
joo, joi = sch[c].split(jo, 4)
sch[c].vectorize(ji)
sch[c].reorder(ii, io, joo, joi, ji)
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.inner"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.outer"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "j.outer.outer"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "j.outer.inner"
ir = ir.body
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
sch = te.create_schedule(c |
.op)
sch[c].fuse(c.op.axis[0], c.op.axis[1])
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.j.fused"
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
sch = te.create_schedule(c.op)
sch[c].split(c.op.axis[0], 3)
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_capture():
n = 8
constant_tuple = (10, n)
constant_list = [[1, 2], [3, n]]
const_value = 1
@te.hybrid.script
def add_something(a):
c = output_tensor((constant_tuple[1],), "int32")
for i in range(constant_tuple[1]):
c[i] = a[i] + constant_list[1][const_value]
return c
a = te.placeholder((n,), dtype="int32", name="a")
func, ins, outs = run_and_check(add_something, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_array_inputs():
@script
def sum_array(inputs):
out = output_tensor((10,), inputs[0].dtype)
n = len(inputs)
for i in range(10):
for j in const_range(n):
out[i] += inputs[j][i]
return out
n = 5
inputs = []
for i in range(n):
inputs.append(te.placeholder((10,), name="t%s" % i, dtype="float32"))
out = sum_array(tvm.runtime.convert(inputs))
assert len(out.op.inputs) == n
sch = te.create_schedule(out.op)
mod = tvm.build(sch, inputs + [out], target="llvm")
assert mod
input_nd = []
out_ref = numpy.zeros((10,))
for _ in range(n):
arr = numpy.random.uniform(size=(10,)).astype("float32")
input_nd.append(tvm.nd.array(arr))
out_ref += arr
out_nd = tvm.nd.array(numpy.zeros((10,), "float32"))
mod(*input_nd, out_nd) |
tvm.testing.assert_allclose(out_nd.numpy(), out_ref)
if __name__ == "__main__":
test_outer_product()
test_fanout()
test_looptype()
test_if()
test_bind()
test_math_intrin()
test_non_zero()
test_allocate()
test_upstream()
test_downstream()
test_const_param()
test_value_index()
test_func_call()
test_bool()
test_const_range()
test_schedule()
test_capture()
test_array_inputs() |
import pytest |
import tvm
from tvm |
import te |
import pickle as pkl
def test_schedule_create():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
AA = te.compute((m, l), lambda i, j: A[i, j])
T = te.compute((m, n, l), lambda i, j, k: AA(i, k) * B(j, k))
s = te.create_schedule(T.op)
s[AA].set_scope("shared")
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
s[AA].compute_at(s[T], xi1)
xo, xi = s[AA].split(AA.op.axis[0], factor=10)
s[T].reorder(xi2, xi1)
assert T.op.axis[1] in s[T].leaf_iter_vars
json_str = tvm.ir.save_json(s)
s_loaded = tvm.ir.load_json(json_str)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
def test_reorder():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute(m, lambda i: A[i + 1])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
order = (xi2, xi1, xo)
assert tuple(s[T].leaf_iter_vars) != order
s[T].reorder(*order)
assert tuple(s[T].leaf_iter_vars) == order
try:
s[T].reorder(xi2, xi1, xi2)
assert False
except tvm.error.TVMError:
pass
def test_split():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
assert tuple(s[T].leaf_iter_vars) == (xo, xi)
def test_tile():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], |
T.op.axis[1], x_factor=10, y_factor=5)
assert tuple(s[T].leaf_iter_vars) == (xo, yo, xi, yi)
def test_fuse():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
fused = s[T].fuse(xo, yo)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused, xi, yi)
def test_fuse_with_split():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
fused = s[T].fuse(xi, y)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (xo, fused)
def test_fuse_with_out_of_order_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
with pytest.raises(RuntimeError):
fused = s[T].fuse(xo, y)
def test_fuse_with_out_of_order_axis_with_reorder():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
fused = s[T].fuse(y, xo)
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
with pytest.raises(RuntimeError):
fused = s[T].fuse(y, xi)
def test_singleton():
A = te.placeholder((), name="A")
T = te.compute((), lambda: A() + 1)
s = te.create_schedule(T.op)
fused = s[T].fuse |
()
assert any(isinstance(x, tvm.te.schedule.Singleton) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused,)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
def test_vectorize():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi)
s[T].unroll(xi)
UNROLL = tvm.te.schedule.IterVar.Unrolled
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
def test_vectorize_commreduce():
V = te.placeholder((128,), name="V")
ax = te.reduce_axis((0, 128), name="ax")
O = te.compute((1,), lambda _: te.sum(V[ax], axis=[ax]))
s = te.create_schedule(O.op)
with pytest.raises(RuntimeError):
s[O].vectorize(ax)
def test_pragma():
m = 100
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].pragma(xo, "pragma1")
s[T].pragma(xi, "vectorize")
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xo].pragma_keys[0].value == "pragma1"
assert s[T].iter_var_attrs[xi].iter_type == VECTORIZE
def test_rfactor():
n = te.size_var("n")
k1 = te.reduce_axis((0, n), name="k1")
k2 = te.reduce_axis((0, n), name="k2")
A = te.placeholder((n, n, n), name="A")
B = te.compute((n,), lambda i: te.sum(A[i, k1, k2], axis=[k1, k2]))
s = te.create_schedule(B.op)
BF = s.rfactor(B, k1)
assert tuple(BF.shape) == (n, n)
assert set(BF.op.body[0].axis) == set([k2])
assert s[B].op.body[0].axis[0].dom.extent == n
assert len(s[B].all_iter_vars) == 2
s = te.create_schedule(B.op)
ko, ki = s[B].s |
plit(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki)
assert BF.shape[0].value == 4
assert BF.shape[1] == n
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki, 1)
assert n == BF.shape[0]
assert BF.shape[1].value == 4
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
def test_tensor_intrin():
n = 16
x = te.placeholder((n,), name="x")
y = te.placeholder((n,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
def intrin_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0].value == n
return tvm.tir.call_packed("vadd", ins[0].data, outs[0].data, ins[0].shape[0])
intrin = te.decl_tensor_intrin(z.op, intrin_func)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0].value == n
m = 32
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=n)
s[z].tensorize(xi, intrin)
assert s[z].iter_var_attrs[xi].tensor_intrin == intrin
assert s[z].iter_var_attrs[xi].iter_type == tvm.te.schedule.IterVar.Tensorized
def test_tensor_intrin_scalar_params():
n = te.size_var("n")
x = te.placeholder((n,), name="x")
v = te.size_var("v")
w = te.size_var("w")
z = te.compute((n,), lambda i: x[i] * v + w, name="z")
def intrin_func(ins, outs, sp):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0] == n
asse |
rt sp[0] == v
assert sp[1] == w
return tvm.tir.call_packed("hw_func", ins[0].data, outs[0].data, sp[0], sp[1])
intrin = te.decl_tensor_intrin(
z.op, intrin_func, scalar_params=[v, w], default_buffer_params={"offset_factor": 1}
)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0] == n
assert tuple(intrin.scalar_params) == tuple((v, w))
A = te.placeholder((10, 10), name="A")
C = te.compute((10, 10), lambda i, j: intrin(i * i, A[i, j], i + j), name="C")
s = te.create_schedule(C.op)
stmt = tvm.lower(s, [A, C])["main"].body
assert isinstance(stmt.body.body, tvm.tir.Evaluate)
assert len(stmt.body.body.value.args) == 5
assert str(stmt.body.body.value.args[3]) == "(i: int32*i)"
assert str(stmt.body.body.value.args[4]) == "(i: int32 + j: int32)"
def test_legalize_invalid_attach():
A = te.compute((10, 10), lambda i, j: 1.0, name="A")
B = te.compute((10, 10), lambda i, j: A[i][j], name="B")
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].split(B.op.axis[1], 2)
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.For)
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].fuse(B.op.axis[0], B.op.axis[1])
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt, tvm.tir.stmt.For)
def test_compute_at():
def add():
shape = (16, 16)
A = tvm.te.compute(shape, lambda *i: 1.0, name="A")
B = tvm.te.compute(shape, lambda *i: 2.0, name="B")
C = tvm.te.compute(shape, lambda *i: A(*i) + B(*i), name="C")
return A, B, C
def invalid_compute_at_self():
A, B, C = add()
s = tvm.te.create_schedule(C.op)
s[C].compute_at(s[C], C.op.axis[0])
with pytest.raises(RuntimeError):
tvm.lower(s, |
[A, B], simple_mode=True)
def invalid_compute_at_loop():
A, B, C = add()
s = tvm.te.create_schedule(C.op)
s[A].compute_at(s[C], C.op.axis[0])
s[C].compute_at(s[A], A.op.axis[0])
with pytest.raises(RuntimeError):
tvm.lower(s, [C], simple_mode=True)
invalid_compute_at_self()
invalid_compute_at_loop()
if __name__ == "__main__":
test_singleton()
test_pragma()
test_tensor_intrin()
test_tensor_intrin_scalar_params()
test_rfactor()
test_schedule_create()
test_reorder()
test_tile()
test_split()
test_fuse()
test_fuse_with_split()
test_fuse_with_out_of_order_axis()
test_fuse_with_out_of_order_axis_with_reorder()
test_vectorize()
test_vectorize_commreduce()
test_legalize_invalid_attach()
test_compute_at() |
import tvm |
import tvm.testing
from tvm |
import te
def test_bound1():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule([A2.op])
xo, xi = s[A2].split(s[A2].op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 8
def test_bound2():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, yo, xi, yi = s[A2].tile(A2.op.axis[0], A2.op.axis[1], 8, 8)
_ = s.normalize()
s[A1].compute_at(s[A2], yo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 8
assert bounds[A1.op.axis[1]].extent.value == 8
def test_bound3():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].set_scope("shared")
xo, xi = s[A2].split(A2.op.axis[0], 32)
xi0, xi1 = s[A2].split(xi, nparts=16)
s[A2].bind(xi0, te.thread_axis("threadIdx.x"))
yo, yi = s[A2].split(A2.op.axis[1], 16)
_ = s.normalize()
s[A2].reorder(xo, xi0, yo, xi1, yi)
s[A1].compute_at(s[A2], yo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 32
assert bounds[A1.op.axis[1]].extent.value == 16
def test_bound_split_ext_less_than_factor():
m = 8
I = te.placeholder((m,), name="I")
EF = te.compute((m,), lambda i: I[i] * 2, name="EF")
E = te.compute((m,), lambda i: EF[i] * |
2, name="E")
s = te.create_schedule([E.op])
xo, xi = s[E].split(s[E].op.axis[0], factor=32)
s[EF].compute_at(s[E], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xi].extent.value == m
def test_bound_split_ext_less_than_naprts():
m = 8
I = te.placeholder((m,), name="I")
EF = te.compute((m,), lambda i: I[i] * 2, name="EF")
E = te.compute((m,), lambda i: EF[i] * 2, name="E")
s = te.create_schedule([E.op])
xo, xi = s[E].split(s[E].op.axis[0], nparts=32)
s[EF].compute_at(s[E], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xo].extent.value == m
def test_bound_split_divisible():
m = te.var("m")
l = te.var("l")
A = te.placeholder((8 * m, l), name="A")
B = te.compute((8 * m, l), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], 8)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xo].extent == m
assert bounds[xi].extent.value == 8
def test_bound_tile_divisible():
m = te.var("m")
l = te.var("l")
shape = (8 * m, 32 * l)
A = te.placeholder(shape, name="A")
B = te.compute(shape, lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], 8, 32)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xo].extent == m
assert bounds[xi].extent.value == 8
assert bounds[yo].extent == l
assert bounds[yi].extent.value == 32
def test_bound_fusesplit1():
m = te.var("m")
l = te.var("l")
split1 = te.var("s")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
fused_axes = s[A2].fuse(A2.op.axis[0], A2.op.axis[1]) |
xo, xi = s[A2].split(fused_axes, split1)
s[A1].compute_at(s[A2], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
idxdiv = tvm.tir.indexdiv
tvm.testing.assert_prim_expr_equal(bounds[A1.op.axis[0]].min, idxdiv(xo * split1, l))
expected_extent = idxdiv((xo + 1) * split1 - 1, l) - idxdiv(xo * split1, l) + 1
for i in range(1, 6):
for j in range(1, 6):
for k in range(1, 6):
vars = tvm.runtime.convert(
{
split1: tvm.tir.const(i, "int32"),
l: tvm.tir.const(j, "int32"),
xo.var: tvm.tir.const(k, "int32"),
}
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].extent, vars),
tvm.tir.stmt_functor.substitute(expected_extent, vars),
)
tvm.testing.assert_prim_expr_equal(bounds[A1.op.axis[1]].extent, l)
def test_bound_fusesplit2():
m = te.var("m")
l = tvm.runtime.convert(6)
split = tvm.runtime.convert(3)
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
fused_axes = s[A2].fuse(A2.op.axis[0], A2.op.axis[1])
xo, xi = s[A2].split(fused_axes, split)
s[A1].compute_at(s[A2], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
vars = tvm.runtime.convert({xo.var: tvm.tir.const(5, "int32")})
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].min, vars), 2
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[1]].min, vars), 3
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].extent, vars), 1
)
tvm.testing |
.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[1]].extent, vars), 3
)
def test_bound_warp():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].set_scope("warp")
xo, xi = s[A2].split(A2.op.axis[0], 32)
xi0, xi1 = s[A2].split(xi, factor=16)
tx = te.thread_axis("threadIdx.x")
s[A2].bind(xi1, tx)
s[A2].bind(xi0, te.thread_axis("threadIdx.y"))
y = s[A2].op.axis[1]
s[A1].compute_at(s[A2], y)
xo, xi = s[A1].split(s[A1].op.axis[0], factor=16)
s[A1].bind(xi, tx)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 16
def test_bound_scan():
m = te.var("m")
n = te.var("n")
X = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: X[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i])
s_scan = tvm.te.scan(s_init, s_update, s_state)
assert tuple(s_scan.shape) == (m, n)
s = te.create_schedule(s_scan.op)
XX = s.cache_read(X, "local", s_update)
xo, xi = s[s_update].split(s_update.op.axis[1], factor=4)
s[XX].compute_at(s[s_update], xo)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
assert bounds[XX.op.axis[1]].extent.value == 4
def test_bound_conv1d():
n = te.var("n")
A = te.compute((n + 2), lambda i: 1, name="A")
def computeB(ii):
i = ii + 1
return A[i - 1] + A[i] + A[i + 1]
B = te.compute(n, computeB, name="B")
s = te.create_schedule(B.op)
s[A].compute_at(s[B], B.op.axis[0])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[A.op.axis[0]].extent.value == 3 |
def test_bound_blur():
n = tvm.runtime.convert(12)
A = te.compute((n, n), lambda i, j: 1, name="A")
def computeB(ii, jj):
i = ii + 1
j = jj + 1
return A[i][j] + A[i - 1][j] + A[i + 1][j] + A[i][j + 1] + A[i][j - 1]
B = te.compute((n - 2, n - 2), computeB, name="B")
s = te.create_schedule(B.op)
s[A].compute_at(s[B], B.op.axis[1])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[A.op.axis[0]].extent.value == 3
assert bounds[A.op.axis[1]].extent.value == 3
def test_bound_rfactor():
n = te.var("n")
A = te.placeholder((n,), name="A")
k = te.reduce_axis((0, n))
B = te.compute((1,), lambda i: te.sum(A[k], axis=k, where=(i > 1)), name="B")
s = te.create_schedule(B.op)
kf, ki = s[B].split(k, nparts=4)
BF = s.rfactor(B, kf)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[BF.op.axis[0]].extent.value == 4
assert bounds[BF.op.axis[1]].extent.value == 1
def test_bound_group_schedule():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g = s.create_group(outputs=x1, inputs=x, include_inputs=True)
g.compute_at(s[x2], x2.op.axis[0])
assert s[x1].group == g
assert s[x].group == g
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[x.op.axis[0]].extent.value == 1
assert bounds[x.op.axis[1]].extent == n
def test_bound_nest_group():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g1 = s.create_group(outputs=x, inputs=x, include_inputs=True)
g2 = s.cre |
ate_group(outputs=x1, inputs=x, include_inputs=True)
assert s[x].group == g1
assert s[x1].group == g2
g2.compute_at(s[x2], x2.op.axis[0])
g1.compute_at(s[x1], s[x1].op.axis[1])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[x.op.axis[0]].extent.value == 1
assert bounds[x.op.axis[1]].extent.value == 1
assert bounds[x1.op.axis[0]].extent.value == 1
assert bounds[x1.op.axis[1]].extent == n
def test_bound_nest_thread():
m = te.var("m")
A = te.placeholder((m), name="A")
A1 = te.compute((m,), lambda i: A[i], name="A1")
A2 = te.compute((m,), lambda i: A1[i] + 2, name="A2")
A3 = te.compute((m,), lambda i: A2[i] + 3, name="A3")
s = te.create_schedule(A3.op)
s[A2].set_scope("shared")
s[A1].set_scope("local")
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
bx, tx = s[A3].split(A3.op.axis[0], factor=32)
s[A3].bind(bx, block_x)
s[A3].bind(tx, thread_x)
s[A2].compute_at(s[A3], tx)
_, xi = s[A2].split(A2.op.axis[0], nparts=1)
s[A2].bind(xi, thread_x)
s[A1].compute_at(s[A3], tx)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[A1.op.axis[0]].extent.value == 1
assert bounds[A2.op.axis[0]].extent.value == 32
assert bounds[A3.op.axis[0]].extent == m
def test_gemm_bound():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n, n), name="A")
B = te.placeholder((n, n), name="B")
k = te.reduce_axis((0, n), name="k")
C = te.compute((n, n), lambda ii, jj: te.sum(A[ii, k] * B[jj, k], axis=k), name="CC")
s = te.create_schedule(C.op)
xtile, ytile = 32, 32
scale = 8
num_thread = 8
block_factor = scale * num_thread
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_y = te.thread_axis("threadIdx.y")
CC = s.cache_write(C, "local")
AA = s.cache_read(A, "shared", [CC])
BB = s.cache_ |
read(B, "shared", [CC])
by, yi = s[C].split(C.op.axis[0], factor=block_factor)
bx, xi = s[C].split(C.op.axis[1], factor=block_factor)
s[C].reorder(by, bx, yi, xi)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
ty, yi = s[C].split(yi, nparts=num_thread)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(ty, tx, yi, xi)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
s[CC].compute_at(s[C], tx)
s[AA].compute_at(s[CC], k)
s[BB].compute_at(s[CC], k)
ty, xi = s[AA].split(s[AA].op.axis[0], nparts=num_thread)
tx, xi = s[AA].split(xi, nparts=num_thread)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
ty, xi = s[BB].split(s[BB].op.axis[0], nparts=num_thread)
tx, xi = s[BB].split(xi, nparts=num_thread)
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[BB.op.axis[0]].extent.value == 64
assert bounds[AA.op.axis[0]].extent.value == 64
assert bounds[CC.op.axis[0]].extent.value == 8
assert bounds[CC.op.axis[1]].extent.value == 8
def test_bound_tensor_compute_op():
def intrin_test():
m1 = te.var("m1")
n1 = te.var("n1")
a = te.placeholder((m1, n1), name="a")
c = te.compute((1, n1), lambda i, j: a[0, j] + a[1, j] + a[2, j], name="c")
Ab = tvm.tir.decl_buffer(a.shape, name="Abuf", offset_factor=1)
Cb = tvm.tir.decl_buffer(c.shape, name="Cbuf", offset_factor=1)
def intrin_func(ins, outs):
aa = ins[0]
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern("int32", "test", cc.access_ptr("w"), aa.access_ptr("r"))
)
return ib.get()
return _body()
return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, c: Cb})
test_func = intrin_test()
A = te.placehold |
er((20, 20), name="A")
B = te.compute(A.shape, lambda i, j: A[i, j], name="B")
C = te.compute((10, 20), lambda i: test_func(B[i:10, 0:20]), name="C")
s = te.create_schedule(C.op)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[B.op.axis[0]].extent.value == 10
def test_bound_simplification_failure():
A = te.compute((2,), lambda j: j, "A")
def _check(B, A=A):
s = te.create_schedule(B.op)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.lower(s, [B, A], simple_mode=True)
if not bounds[A.op.axis[0]].extent.value <= 2:
print(stmt)
assert bounds[A.op.axis[0]].extent.value <= 2
tdiv = tvm.tir.truncdiv
_check(te.compute((10,), lambda i: A[tvm.te.min(3 * i, 4 * i) + tvm.te.min(-3 * i, -2 * i)]))
_check(te.compute((10,), lambda i: A[tvm.te.min(3 * i, 4 * i) + tvm.te.max(-3 * i, -4 * i)]))
_check(te.compute((10,), lambda i: A[-2 * tdiv(i, 2) - tvm.te.min(i, 0 - i)]))
_check(te.compute((10,), lambda i: A[i + (0 - i)]))
_check(te.compute((10,), lambda i: A[i]))
if __name__ == "__main__":
test_bound_nest_thread()
test_bound1()
test_bound_nest_group()
test_bound_group_schedule()
test_bound_scan()
test_bound3()
test_bound_rfactor()
test_bound_blur()
test_bound_conv1d()
test_bound2()
test_gemm_bound()
test_bound_warp()
test_bound_tensor_compute_op()
test_bound_simplification_failure()
test_bound_fusesplit1()
test_bound_fusesplit2()
test_bound_split_divisible()
test_bound_tile_divisible() |
import tvm
from tvm |
import te
def test_bound_tile_mod():
def compute(M_tiles, N_tiles, factor, dtype):
M = M_tiles * factor
N = N_tiles * factor
A = tvm.te.placeholder((N, M), name="A", dtype=dtype)
C = tvm.te.compute((N, M), lambda n, m: A[n, m], name="C")
s = tvm.te.create_schedule(C.op)
return s, A, C
def schedule(s, factor, padding, A, C):
C_local = s.cache_write(C, "local")
n, m = C.op.axis
bn, bm, ni, mi = s[C].tile(n, m, factor, factor)
nio, nii = s[C].split(ni, 2)
n = s[C].fuse(nii, mi)
C_shared = s.cache_write(C, "shared")
bn, bm, ni, mi = C_shared.op.axis
s[C_shared].storage_align(ni, factor * 2, padding)
n, m = s[C].op.axis
bn, bm, ni, mi = s[C].tile(n, m, factor, factor)
s[C].set_scope("global")
niio, niii = s[C].split(ni, 32)
s[C_shared].compute_at(s[C], niio)
return s
s, A, C = compute(2, 2, 128, "float16")
s = schedule(s, 128, 8, A, C)
bounds = tvm.te.schedule.InferBound(s)
check = bounds[s.stages[2].op.axis[2]].extent == 16
if not check:
print(tvm.lower(s, [A, C], simple_mode=True))
assert check
if __name__ == "__main__":
test_bound_tile_mod() |
import tvm
from tvm |
import te
def test_scan():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i], name="s_init")
x_trans = te.compute((m, n), lambda i, j: x[i, j] + 1, name="x_trans")
s_up1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + 1, name="up1")
s_update = te.compute((m, n), lambda t, i: s_up1[t, i] + x_trans[t, i], name="update")
s_scan = tvm.te.scan(s_init, s_update, s_state)
def test_getbody():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
assert set(body) == set([s_scan.op, s_update.op, s_up1.op])
def test_attach_path():
s = te.create_schedule(s_scan.op)
s[x_trans].compute_at(s[s_update], s_update.op.axis[0])
apath = tvm.te.schedule.CreateAttachPath(s)
assert tuple(apath[s_update.op]) == tuple([s_scan.op.scan_axis])
assert tuple(apath[x_trans.op]) == tuple([s_update.op.axis[0], s_scan.op.scan_axis])
def test_fix_pt():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.spatial_axis_[0]].value != 0
def test_scan_fix_point():
m = te.var("m")
n = te.var("n")
l = te.var("l")
x = te.compute((l, m, n), lambda *i: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((l, m, n))
s_init = te.compute((1, m, n), lambda _, i, j: x[0, i, j], name="s_init")
def test_scan0():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 1
def test_scan1():
s_update = te.compute(
(l, |
m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, j, i], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan3_not_exact_reach():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, i, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, 10] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan4_reach_other():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, j, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, j] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan5_multi_output():
m = te.var("m")
n = te.var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda |
t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
body = tvm.te.schedule.ScanGetBody(r0.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(r0.op)
assert fxpt[r1.op.spatial_axis_[0]].value == 1
test_scan0()
test_scan1()
test_scan3_not_exact_reach()
test_scan4_reach_other()
test_scan5_multi_output()
def test_create_read_graph():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j])
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3)
g = tvm.te.schedule.CreateReadGraph([A2.op])
assert g[A2.op][0] == A1
assert g[A1.op][0] == A
post_order = tvm.te.schedule.PostDFSOrder([A2.op], g)
assert post_order[0] == A.op
assert post_order[1] == A1.op
if __name__ == "__main__":
test_scan()
test_create_read_graph()
test_scan_fix_point() |
import tvm
from tvm |
import te
def test_lstm_cell_inline():
num_step = 128
num_input = 256
num_hidden = 1152
batch_size = 4
X = te.placeholder((num_step - 1, batch_size, num_input), name="X")
Wi2h = te.placeholder((4, num_hidden, num_input), name="Wi2h")
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
k = te.reduce_axis((0, num_input), name="ki2h")
s_i2h = te.compute(
(num_step, 4, batch_size, num_hidden),
lambda t, x, i, j: te.sum(X[t - 1, i, k] * Wi2h[x, j, k], axis=k),
name="s_i2h",
)
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, 4, batch_size, num_hidden),
lambda t, x, i, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
gates = te.compute(s_i2h.shape, lambda *i: s_i2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, 0, i, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, 1, i, j]), name="in_transform"
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, 2, i, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, 3, i, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), |
name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[X],
name="lstm_scan",
)
s = te.create_schedule(scan_h.op)
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
tvm.lower(s, [X, Wi2h, Wh2h, scan_h, scan_c])
if __name__ == "__main__":
test_lstm_cell_inline() |
import numpy as np |
import tvm
from tvm |
import te
from tvm.driver.build_module |
import schedule_to_module
def test_const():
x = tvm.te.const(1, "int32")
assert x.dtype == "int32"
assert isinstance(x, tvm.tir.IntImm)
def test_schedule0():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
s = te.create_schedule(A1.op)
mod = schedule_to_module(s, [A, A1])
assert isinstance(mod["main"], tvm.tir.PrimFunc)
def test_schedule1():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
s = te.create_schedule(A1.op)
xo, xi = s[A1].split(A1.op.axis[0], 8)
s[A1].pragma(xo, "auto_unroll_max_step", 10)
mod = schedule_to_module(s, [A, A1])
assert isinstance(mod["main"], tvm.tir.PrimFunc)
def test_schedule2():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
mod = schedule_to_module(s, [A, A2])
assert isinstance(mod["main"], tvm.tir.PrimFunc)
def test_schedule_scan():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + x[t, i])
res = tvm.te.scan(s_init, s_update, s_state)
assert tuple(res.shape) == (m, n)
s = te.create_schedule(res.op)
s = s.normalize()
ir = tvm.lower(s, [s_state], simple_mode=True)
bounds = tvm.te.schedule.InferBound(s)
assert bounds[res.op.scan_axis].min.value == 1
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_inline_multi_reduce():
def argmax_comp(x, y):
idx = tvm.tir.Select((x[1] >= y[1]), x[0], y[0]) |
val = tvm.tir.Select((x[1] >= y[1]), x[1], y[1])
return idx, val
def argmax_init(idx_typ, val_typ):
return tvm.tir.const(-1, idx_typ), tvm.te.min_value(val_typ)
argmax = te.comm_reducer(argmax_comp, argmax_init, name="argmax")
m = te.var("m")
n = te.var("n")
val = te.placeholder((m, n), name="val", dtype="float32")
val1 = te.compute((m, n), lambda i, j: val[i, j] + 1, name="val1")
val2 = te.compute((m, n), lambda i, j: te.exp(val1[i, j]), name="val2")
k = te.reduce_axis((0, n), "k")
T_idx, T_val = te.compute((m,), lambda i: argmax((k.var, val2[i, k]), axis=k), name="T")
s = te.create_schedule(T_idx.op)
s[val1].compute_inline()
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_auto_inline():
def elemwise():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.placeholder((m, n), name="C")
T1 = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="T1")
T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2")
return te.create_schedule(T2.op), T1
def broadcast():
m = te.var("m")
n = te.var("n")
A = te.placeholder((1,), name="A")
B = te.placeholder((m, n), name="B")
C = te.placeholder((m, n), name="C")
T1 = te.compute((m, n), lambda i, j: A(0) * B(i, j), name="T1", tag="broadcast")
T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2")
return te.create_schedule(T2.op), T1
def injective():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m,), name="A")
B = te.placeholder((m, n), name="B")
C = te.placeholder((m, n), name="C")
T1 = te.compute((m, n), lambda i, j: A(i) * B(i, j), name="T1")
T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2")
return te.create_schedule(T2.op), |
T1
def check_auto_inline(schedule_func, auto_inline_func):
s, T1 = schedule_func()
assert s[T1].attach_type == 1
auto_inline_func(s)
assert s[T1].attach_type == 2
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
check_auto_inline(elemwise, tvm.te.schedule.AutoInlineElemWise)
check_auto_inline(broadcast, tvm.te.schedule.AutoInlineBroadcast)
check_auto_inline(injective, tvm.te.schedule.AutoInlineInjective)
def test_schedule_const_bound():
n = 128
A = te.placeholder((n,), name="A")
A1 = te.compute((n,), lambda i: A[i] + 1, name="A1")
s = te.create_schedule(A1.op)
xo, xi = s[A1].split(A1.op.axis[0], 8)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_inline_mixed():
n = te.var("n")
A = te.placeholder((n,), name="A")
A1 = te.compute(A.shape, lambda *i: A(*i) + 1, name="A1")
A2 = te.compute(A.shape, lambda *i: A1(*i) + 2, name="A2")
C = te.compute((n,), lambda i: A2[i] + A1[i], name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=8)
s[A1].compute_at(s[C], xo)
s[A2].compute_inline()
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def check(x):
if isinstance(x, tvm.tir.Call):
assert x.func != A2
tvm.tir.stmt_functor.post_order_visit(s[C].op.body[0], check)
def test_scan_inline1():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state1 = te.placeholder((m, n))
s_state2 = te.placeholder((m, n))
s_init1 = te.compute((1, n), lambda _, i: x[0, i])
s_init2 = te.compute((1, n), lambda _, i: x[0, i])
s_x1 = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + x[t, i], name="x1")
s_x2 = te.compute((m, n), la |
mbda t, i: s_state2[t - 1, i] + 1, name="x2")
s_update1 = te.compute((m, n), lambda t, i: s_x1[t, i], "u1")
s_update2 = te.compute((m, n), lambda t, i: s_x2[t, i], "u2")
res1, res2 = tvm.te.scan([s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2])
s = te.create_schedule(res1.op)
s[s_x1].compute_inline()
stmt = tvm.lower(s, [x, res1, res2])
def test_scan_inline2():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state1 = te.placeholder((m, n))
s_state2 = te.placeholder((m, n))
s_init1 = te.compute((1, n), lambda _, i: x[0, i])
s_init2 = te.compute((1, n), lambda _, i: x[0, i])
s_xx = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + x[t, i], name="xx")
s_x1 = te.compute((m, n), lambda t, i: s_xx[t, i] + 1, name="x1")
s_x2 = te.compute((m, n), lambda t, i: s_xx[t, i] + s_state2[t - 1, 2], name="x2")
s_update1 = te.compute((m, n), lambda t, i: s_x1[t, i], "u1")
s_update2 = te.compute((m, n), lambda t, i: s_x2[t, i], "u2")
res1, res2 = tvm.te.scan([s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2])
s = te.create_schedule(res1.op)
s[s_xx].compute_inline()
s[s_x1].compute_inline()
s[s_x2].compute_inline()
stmt = tvm.lower(s, [x, res1, res2])
def test_schedule_cache():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "shared", readers=[C])
CC = s.cache_write(C, "shared")
s[AA].compute_at(s[CC], CC.op.axis[0])
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_middle_cache():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C") |
D = te.compute((m, n), lambda i, j: C(i, j), name="D")
s = te.create_schedule(D.op)
AA = s.cache_read(A, "local", readers=[C])
BB = s.cache_read(B, "local", readers=[C])
CC = s.cache_read(C, "local", readers=[D])
DD = s.cache_write(D, "local")
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout1():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C")
s = te.create_schedule(C.op)
s[C].reorder(C.op.axis[1], C.op.axis[0])
CC = s.cache_write(C, "global")
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout2():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m * 4, n), name="A")
B = te.placeholder((m * 4, n), name="B")
C = te.compute(A.shape, lambda i, j: A(i, j) * B(i, j), name="C")
s = te.create_schedule(C.op)
x, y = C.op.axis
xo, xi = s[C].split(x, factor=4)
s[C].reorder(xo, y, xi)
CC = s.cache_write(C, "global")
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout3():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m * 4, n), name="A")
B = te.placeholder((m * 4, n), name="B")
k = te.reduce_axis((0, n), "k")
C = te.compute((A.shape[0],), lambda i: te.sum(A(i, k) * B(i, k), axis=k), name="C")
s = te.create_schedule(C.op)
x = C.op.axis[0]
xo, xi = s[C].split(x, factor=4)
CC = s.cache_write(C, "global")
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout4():
def _compute(*indice):
return A(*indice) + 1, B(*indice) / 2
m = te.var("m")
n = te.var("n")
A = te.placeholder((m * 4, n), name="A") |
B = te.placeholder((m * 4, n), name="B")
C1, C2 = te.compute(A.shape, _compute, name="C")
s = te.create_schedule([C1.op, C2.op])
C1_cache, C2_cache = s.cache_write([C1, C2], "local")
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def intrin_gemv(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemm", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
reset = tvm.tir.call_packed("fill_zero", zz_ptr, n)
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, reset, update
buffer_params = {"data_alignment": 16, "offset_factor": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def test_schedule_tensor_compute1():
M, N, L = 2048, 1024, 512
factor, rfactor = 16, 16
A = te.placeholder((N
B = te.placeholder((M, L
k = te.reduce_axis((0, L
gemv = intrin_gemv(factor, rfactor)
C = te.compute(
(N, M
lambda i, j: gemv(A[i, k, 0:factor, 0:factor], B[j, k, 0:rfactor], reduce_axis=k),
name="C",
)
s = te.create_schedule(C.op)
ai, aj, ax = s[C].op.axis
aio, aii = s[C].split(ai, 16)
s[C].reorder(aio, aj, aii)
aioo, ajo, aioi, aji = s[C].tile(aio, aj, 16, 4)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def intrin_vadd(n, cache_read=False, cache_write=False):
scope_ubuf = "loca |
l"
dtype = "float32"
x = te.placeholder((n,), dtype=dtype, name="vx")
y = te.placeholder((n,), dtype=dtype, name="vy")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
def create_buffer(t):
return tvm.tir.decl_buffer(
t.shape, t.dtype, name="W" + t.name, scope=scope_ubuf, offset_factor=16
)
binds = {}
if cache_read:
binds[x] = create_buffer(x)
binds[y] = create_buffer(y)
if cache_write:
binds[z] = create_buffer(z)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
outs[0].dtype,
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(
z.op, intrin_func, binds=binds, default_buffer_params={"offset_factor": 16}
)
def test_schedule_tensor_compute2():
M = 1024
factor = 16
dtype = "float32"
scope_ubuf = "local"
A = te.placeholder((M
B = te.placeholder((M
vadd = intrin_vadd(factor, True, True)
C = te.compute((M
s = te.create_schedule(C.op)
AL = s.cache_read(A, scope_ubuf, C)
BL = s.cache_read(B, scope_ubuf, C)
CL = s.cache_write(C, scope_ubuf)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_tensor_compute3():
M = 1024
factor = 16
dtype = "float32"
A = te.placeholder((M
B = te.placeholder((M
Bi = te.compute((M
vadd = intrin_vadd(factor)
C = te.compute((M
s = te.create_schedule(C.op)
s[Bi].compute_at(s[C], C.op.axis[0])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_loop_dep_reduce():
X = te.placeholder(shape=(10,), name="x")
def f(n):
rv = |
te.reduce_axis((0, n))
return te.sum(X[rv], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
f = tvm.build(s, [X, Y])
def test_loop_dep_reduce_cache_write():
X = te.placeholder(shape=(10,), name="x")
def f(n):
rv = te.reduce_axis((0, n))
init = lambda dtype: tvm.tir.Select(n > 1, tvm.tir.const(0, dtype), n.astype(dtype))
sum = te.comm_reducer(lambda x, y: tvm.te.max(x + y, n.astype("float32")), init, name="sum")
return sum(X[rv], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
s.cache_write(Y, "local")
f = tvm.build(s, [X, Y])
def test_reduction_and_dummy_fuse_split():
n = 10
X = te.placeholder(shape=(n,), dtype="int32", name="X")
k = te.reduce_axis((0, n))
Y = te.compute((), lambda: te.sum(X[k], k), name="Y")
s = te.create_schedule([Y.op])
ax = s[Y.op].fuse(*Y.op.axis)
axo, axi = s[Y.op].split(ax, nparts=20)
f = tvm.build(s, [Y, X])
args = [tvm.nd.empty((), "int32")] + [tvm.nd.array(np.ones((n,), dtype="int32"))]
f(*args)
assert args[0].numpy() == n
n = 10
X = te.placeholder(shape=(n,), dtype="int32", name="X")
k = te.reduce_axis((0, n))
Y = te.compute((n,), lambda i: te.sum(X[k], k), name="Y")
s = te.create_schedule([Y.op])
ax = s[Y.op].fuse(*(list(Y.op.axis) + list(Y.op.reduce_axis)))
f = tvm.build(s, [Y, X])
args = [tvm.nd.array(np.ones((n,), dtype="int32"))] + [
tvm.nd.array(np.ones((n,), dtype="int32"))
]
f(*args)
assert np.all(args[0].numpy() == n)
def test_schedule_compute_inline():
shape = [10, 1024]
A = te.placeholder(shape, name="A")
B = te.placeholder(shape, name="B")
C = te.compute(shape, lambda *index: A(*index) + B(*index), name="C")
def _compute(*index):
return C(*index), C(*index) * B(*index)
F, E = te.compute(shape, _compute, name="F")
s = te.create_schedule([F.op, E.op])
AL = s.cache_read(A, "local", [C])
BL = |
s.cache_read(B, "local", [C, E])
CL = s.cache_write(C, "local")
FL, EL = s.cache_write([F, E], "local")
s[C].compute_inline()
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_local_stage_predicate():
m = 1
n = 3
p = 2
A = tvm.te.placeholder((m, n, p), name="A")
B = tvm.te.compute((m, n, p), lambda bi, bj, bk: A[bi, bj, bk], name="B")
C = tvm.te.compute((m, n, p), lambda ci, cj, ck: B[ci, cj, ck], name="C")
by = tvm.te.thread_axis("blockIdx.y")
tx = tvm.te.thread_axis("threadIdx.x")
vx = tvm.te.thread_axis("vthread")
def schedule(thread_tag, mem_scope):
s = tvm.te.create_schedule(C.op)
s[B].compute_at(s[C], s[C].op.axis[0])
s[B].set_scope(mem_scope)
bno, bni = s[B].split(s[B].op.axis[1], n)
bx = tvm.te.thread_axis("blockIdx.x")
s[C].bind(s[C].op.axis[0], bx)
s[C].bind(s[C].op.axis[1], thread_tag)
s[B].bind(bni, thread_tag)
return s
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
s = schedule(tx, "local")
lowered_body = tvm.lower(s, [A, C])["main"].body
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
s = schedule(vx, "local")
lowered_body = tvm.lower(s, [A, C])["main"].body
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
s = schedule(by, "shared")
lowered_body = tvm.lower(s, [A, C])["main"].body
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_local_stage_predicate2():
A = tvm.te.placeholder((128,), name="A")
B = tvm.te.compute((128,), lambda bi: A[bi] + 1, name="B")
C = tvm.te.compute((128,), lambda ci: B[ci] + 2, name="C")
s = tvm.te.create_schedule(C.op)
AA = s.cache_read(A, "local", [B])
s[B].set_scope("shar |
ed")
block_x = tvm.te.thread_axis("blockIdx.x")
thread_x = tvm.te.thread_axis((0, 32), "threadIdx.x")
oc, ic = s[C].split(s[C].op.axis[0], factor=64)
ooc, ioc = s[C].split(oc, factor=2)
oic, iic = s[C].split(ic, factor=32)
s[C].bind(ooc, block_x)
s[C].bind(iic, thread_x)
s[B].compute_at(s[C], ioc)
ob, ib = s[B].split(s[B].op.axis[0], factor=32)
s[B].bind(ib, thread_x)
s[AA].compute_root()
s[AA].compute_at(s[C], ooc)
oaa, iaa = s[AA].split(s[AA].op.axis[0], factor=32)
s[AA].bind(iaa, thread_x)
lowered_body = tvm.lower(s, [A, C])["main"].body
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
def visit_stmt(op):
if isinstance(op, tvm.tir.Allocate):
return op.extents[0].value == 97
return False
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
assert any(collect_visit(lowered_body, visit_stmt))
if __name__ == "__main__":
test_loop_dep_reduce()
test_loop_dep_reduce_cache_write()
test_schedule_middle_cache()
test_inline_multi_reduce()
test_schedule_cache_relayout4()
test_schedule_cache_relayout3()
test_schedule_cache_relayout2()
test_schedule_cache_relayout1()
test_schedule_const_bound()
test_scan_inline1()
test_scan_inline2()
test_inline_mixed()
test_auto_inline()
test_schedule_scan()
test_schedule0()
test_schedule1()
test_schedule2()
test_schedule_cache()
test_schedule_tensor_compute1()
test_schedule_tensor_compute2()
test_schedule_tensor_compute3()
test_reduction_and_dummy_fuse_split()
test_schedule_compute_inline()
test_local_stage_predicate()
test_local_stage_predicate2() |
import tvm
from tvm |
import te
from tvm |
import topi |
import numpy as np |
import tvm.testing
def tensor_core_matmul(warp_tile_m=16, m=64, n=32, l=96):
A = te.placeholder((n, l), name="A", dtype="float16")
B = te.placeholder((l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m), lambda i, j: te.sum(A[i, k].astype("float32") * B[k, j].astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 4
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(yo, xo, tz, ty, tx, yi, xi)
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[0], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, x |
i = s[BB].split(s[BB].op.axis[1], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[0], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(l, m)).astype(B.dtype)
c_np = np.zeros((n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("gemm m=%d n=%d k=%d: %f ms" % (m, n, l, evaluator(a, b, c).mean * 1e3))
c_np = np.dot(a_np, b_np)
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3)
def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2):
A = te.placeholder((batch, n, l), name="A", dtype="float16")
B = te.placeholder((batch, l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(batch, n, m), lambda b, i, j: te.sum((A[b, i, k] * B[b, k, j]).astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
z, y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 2
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split( |
ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(z, yo, xo, tz, ty, tx, yi, xi)
s[C].bind(z, te.thread_axis("blockIdx.z"))
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
zo, yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, zo, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[2], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[1], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[2], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[1], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(batch, n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(batch, l, m)).astype(B.dtype)
c_np = np.zeros((batch, n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((batch, n, m), dtype=C.dtype), dev |
)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print(
"batch gemm m=%d n=%d k=%d batch=%d: %f ms"
% (m, n, l, batch, evaluator(a, b, c).mean * 1e3)
)
for bs in range(batch):
c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :])
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3)
@tvm.testing.requires_tensorcore
def test_tensor_core_matmul():
tensor_core_matmul(16)
tensor_core_matmul(8)
tensor_core_matmul(32)
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_matmul():
tensor_core_batch_matmul()
if __name__ == "__main__":
test_tensor_core_matmul()
test_tensor_core_batch_matmul() |
import tvm
from tvm |
import te |
import numpy as np
from tvm.topi.testing |
import conv2d_nhwc_python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.