text
stringlengths 1
2.05k
|
---|
import tvm.testing
VERIFY = True
def intrin_wmma_load_matrix(shape, scope):
n, m, l = shape
if scope == "wmma.matrix_a":
row, col = n, l
elif scope == "wmma.matrix_b":
row, col = l, m
A = te.placeholder((row, col), name="A", dtype="float16")
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="shared", data_alignment=32, offset_factor=row * col
)
C = te.compute((row, col), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, scope=scope, data_alignment=32, offset_factor=row * col
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
n,
m,
l,
BC.elem_offset
BA.access_ptr("r"),
col,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_gemm(shape):
n, m, l = shape
A = te.placeholder((n, l), name="A", dtype="float16")
B = te.placeholder((l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda ii, jj: te.sum(A[ii, k].astype("float") * B[k, jj].astype("float"), axis=k),
name="C",
)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, name="BA", scope="wmma.matrix_a", data_alignment=32, offset_factor=n * l
)
BB = tvm.tir.decl_buffer(
B.shape, B.dtype, name="BB", scope="wmma.matrix_b", data_alignment=32, offset_factor=l * m
)
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
name="BC",
scope="wmma.accumulator",
data_alignment=32,
offset_factor=n * m,
)
def intrin_func(ins, outs):
BA, BB = ins
(BC,) = outs
def init(): |
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_fill_fragment",
BC.data,
n,
m,
l,
BC.elem_offset
0.0,
)
)
return ib.get()
def update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_mma_sync",
BC.data,
BC.elem_offset
BA.data,
BA.elem_offset
BB.data,
BB.elem_offset
BC.data,
BC.elem_offset
)
)
return ib.get()
return update(), init(), update()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
def intrin_wmma_store_matrix(shape):
n, m, l = shape
A = te.placeholder((n, m), name="A", dtype="float32")
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="wmma.accumulator", data_alignment=32, offset_factor=n * m
)
C = te.compute((n, m), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, scope="global", data_alignment=32, offset_factor=n * m
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
BA.data,
n,
m,
l,
BA.elem_offset
BC.access_ptr("w"),
m,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
@tvm.testing.requires_tensorcore
de |
f test_tensor_core_batch_matmal():
batch_size = 4
n = 512
m, l = n, n
assert n % 32 == 0
assert m % 8 == 0
assert l % 16 == 0
nn, mm, ll = n
A = te.placeholder((batch_size, nn, ll, 32, 16), name="A", dtype="float16")
B = te.placeholder((batch_size, ll, mm, 16, 8), name="B", dtype="float16")
k1 = te.reduce_axis((0, ll), name="k1")
k2 = te.reduce_axis((0, 16), name="k2")
C = te.compute(
(batch_size, nn, mm, 32, 8),
lambda b, i, j, ii, jj: te.sum(
A[b, i, k1, ii, k2].astype("float") * B[b, k1, j, k2, jj].astype("float"), axis=[k1, k2]
),
name="Fragment_C",
)
s = te.create_schedule(C.op)
warp_size = 32
kernel_size = 16
block_row_warps = 2
block_col_warps = 4
warp_row_tiles = 4
warp_col_tiles = 2
chunk = 4
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
b, i, j, kernel_i, kernel_j = s[C].op.axis
i, ii = s[C].split(i, factor=warp_row_tiles)
block_i, i = s[C].split(i, factor=block_row_warps)
j, jj = s[C].split(j, factor=warp_col_tiles)
block_j, j = s[C].split(j, factor=block_col_warps)
s[C].reorder(block_i, block_j, i, j, ii, jj, kernel_i, kernel_j)
s[C].bind(b, block_z)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(i, thread_y)
s[C].bind(j, thread_z)
s[CF].compute_at(s[C], j)
b, warp_i, warp_j, _i, _j = s[CF].op.axis
k, _k = CF.op.reduce_axis
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(ko, ki, warp_i, warp_j, _i, _j, _k)
s[AF].compute_at(s[CF], ki)
s[BF].comp |
ute_at(s[CF], ki)
s[AS].compute_at(s[CF], ko)
b, xo, yo, xi, yi = AS.op.axis
tx, xo = s[AS].split(xo, nparts=block_row_warps)
ty, yo = s[AS].split(yo, nparts=block_col_warps)
t = s[AS].fuse(xi, yi)
to, ti = s[AS].split(t, nparts=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(to, thread_x)
s[BS].compute_at(s[CF], ko)
b, xo, yo, xi, yi = BS.op.axis
tx, xo = s[BS].split(xo, nparts=block_row_warps)
ty, yo = s[BS].split(yo, nparts=block_col_warps)
t = s[BS].fuse(xi, yi)
to, ti = s[BS].split(t, nparts=warp_size)
s[BS].bind(tx, thread_y)
s[BS].bind(ty, thread_z)
s[BS].bind(to, thread_x)
s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix((32, 8, 16), "wmma.matrix_a"))
s[BF].tensorize(BF.op.axis[-2], intrin_wmma_load_matrix((32, 8, 16), "wmma.matrix_b"))
s[C].tensorize(kernel_i, intrin_wmma_store_matrix((32, 8, 16)))
s[CF].tensorize(_i, intrin_wmma_gemm((32, 8, 16)))
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(batch_size, nn, ll, 32, 16)).astype(A.dtype)
b_np = np.random.uniform(size=(batch_size, ll, mm, 16, 8)).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((batch_size, nn, mm, 32, 8), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("gemm with tensor core: %f ms" % (evaluator(a, b, c).mean * 1e3))
if VERIFY:
func(a, b, c)
a_np = a_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n)
b_np = b_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n)
c_np = c.numpy().transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n)
np.testing.assert_allclose(
c_np, np.matmul(a_np.astype(C.dtype), b_np.astype(C.dtype)), rtol=1e-4, atol=1e-4
)
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_conv():
batch_size = 32
height = 14
width = |
14
in_channels = 32
out_channels = 64
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
block_size = 16
block_row_warps = 2
block_col_warps = 4
warp_row_tiles = 4
warp_col_tiles = 2
warp_size = 32
chunk = 2
data_shape = (
batch_size
height,
width,
in_channels
block_size,
block_size,
)
kernel_shape = (
kernel_h,
kernel_w,
in_channels
out_channels
block_size,
block_size,
)
output_shape = (
batch_size
height,
width,
out_channels
block_size,
block_size,
)
assert batch_size % block_size == 0
assert in_channels % block_size == 0
assert out_channels % block_size == 0
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels
ii = te.reduce_axis((0, block_size), name="ii")
A = te.placeholder(data_shape, name="A", dtype="float16")
W = te.placeholder(kernel_shape, name="W", dtype="float16")
Apad = te.compute(
(
batch_size
height + 2 * pad_h,
width + 2 * pad_w,
in_channels
block_size,
block_size,
),
lambda n, h, w, i, nn, ii: tvm.tir.if_then_else(
tvm.tir.all(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width),
A[n, h - pad_h, w - pad_w, i, nn, ii],
tvm.tir.const(0.0, "float16"),
),
name="Apad",
)
Conv = te.compute(
output_shape,
lambda n, h, w, o, nn, oo: te.sum(
Apad[n, h * stride_h + kh, w * stride_w + kw, ic, nn, ii].astype("float32")
* W[kh, kw, ic, o, ii, oo].astype("float32"),
axis=[ic, kh, kw, ii],
),
name="Conv",
)
s = te.create_schedule(Conv.op)
s[Apad].compute_inline()
AS = s.c |
ache_read(Apad, "shared", [Conv])
WS = s.cache_read(W, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
nc, hc, wc, oc, nnc, ooc = Conv.op.axis
block_k = s[Conv].fuse(hc, wc)
s[Conv].bind(block_k, block_z)
nc, nci = s[Conv].split(nc, factor=warp_row_tiles)
block_i, nc = s[Conv].split(nc, factor=block_row_warps)
oc, oci = s[Conv].split(oc, factor=warp_col_tiles)
block_j, oc = s[Conv].split(oc, factor=block_col_warps)
s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
s[Conv].bind(block_i, block_x)
s[Conv].bind(block_j, block_y)
s[Conv].bind(nc, thread_y)
s[Conv].bind(oc, thread_z)
s[ConvF].compute_at(s[Conv], oc)
n, h, w, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
s[WS].compute_at(s[ConvF], kh)
s[AS].compute_at(s[ConvF], kh)
n, h, w, i, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, yo = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, factor=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(ti, thread_x)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, yo = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
s[AF].tensorize(AF.op.axis[-2], intrin_w |
mma_load_matrix((16, 16, 16), "wmma.matrix_a"))
s[WF].tensorize(WF.op.axis[-2], intrin_wmma_load_matrix((16, 16, 16), "wmma.matrix_b"))
s[Conv].tensorize(nnc, intrin_wmma_store_matrix((16, 16, 16)))
s[ConvF].tensorize(nnf, intrin_wmma_gemm((16, 16, 16)))
func = tvm.build(s, [A, W, Conv], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=data_shape).astype(A.dtype)
w_np = np.random.uniform(size=kernel_shape).astype(W.dtype)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), dev)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("conv2d with tensor core: %f ms" % (evaluator(a, w, c).mean * 1e3))
if VERIFY:
func(a, w, c)
a_np = a_np.transpose(0, 4, 1, 2, 3, 5).reshape(batch_size, height, width, in_channels)
w_np = w_np.transpose(0, 1, 2, 4, 3, 5).reshape(
kernel_h, kernel_w, in_channels, out_channels
)
c_np = (
c.numpy().transpose((0, 4, 1, 2, 3, 5)).reshape(batch_size, height, width, out_channels)
)
c_std = conv2d_nhwc_python(
a_np.astype(Conv.dtype), w_np.astype(Conv.dtype), (stride_h, stride_w), (pad_h, pad_w)
).astype(Conv.dtype)
np.testing.assert_allclose(c_np, c_std, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_tensor_core_batch_matmal()
test_tensor_core_batch_conv() |
import tvm
from tvm |
import te
def intrin_vadd(xo, m, n):
x = te.placeholder((n,), name="vx")
y = te.placeholder((n,), name="vy")
if m % n == 0:
body = lambda i: x[i] + y[i]
else:
body = lambda i: tvm.tir.Select(
xo * n + i < m, x[i] + y[i], tvm.tir.const(0, dtype=x.dtype)
)
z = te.compute(x.shape, body, name="z")
def intrin_func(ins, outs):
xx, yy = ins
zz = outs[0]
return tvm.tir.call_packed("vadd", xx, yy, zz)
buffer_params = {"offset_factor": 16}
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params=buffer_params)
def intrin_gemv(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemv", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
reset = tvm.tir.call_packed("fill_zero", zz_ptr, n)
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, reset, update
buffer_params = {"offset_factor": 16, "data_alignment": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def intrin_gemv_no_reset(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr |
= ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemv", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, None, update
buffer_params = {"offset_factor": 16, "data_alignment": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def test_tensorize_vadd():
def add(m):
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
return x, y, z
def check(m, factor):
x, y, z = add(m)
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=factor)
vadd = intrin_vadd(xo, m, factor)
s[z].tensorize(xi, vadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[z], dom_map)
assert tvm.ir.structural_equal(out_dom[z.op.axis[0]].extent, factor)
assert tvm.ir.structural_equal(out_dom[z.op.axis[0]].min, xo * factor)
assert tvm.ir.structural_equal(in_dom.items()[0][1][0].extent, factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[z], out_dom, in_dom, vadd)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(vadd.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [x, y, z])
def check_cache_write(m, factor):
x, y, z = add(m)
s = te.create_schedule(z.op)
_, _ = s[z].split(z.op.axis[0], factor=factor)
z_global = s.cache_write(z, "global")
xo, xi = z_global.op.axis
vadd = intrin_vadd(xo, m, factor)
s[z_global].tensorize(xi, vadd)
s = s.normalize()
dom_map |
= tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[z_global], dom_map)
assert tvm.ir.structural_equal(out_dom[xo].extent, 1)
assert isinstance(out_dom[xo].min, tvm.tir.Var)
assert xo.var.name == out_dom[xo].min.name
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[z_global], out_dom, in_dom, vadd)[0]
ana = tvm.arith.Analyzer()
vars = tvm.runtime.convert({xo.var: out_dom[xo].min})
vadd_body = tvm.tir.stmt_functor.substitute(vadd.op.body[0], vars)
assert tvm.ir.structural_equal(ana.simplify(body), ana.simplify(vadd_body))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [x, y, z])
def check_compute_reuse():
x, y, z = add(32)
def _intrin_vadd():
def _intrin_func(ins, outs):
return tvm.tir.call_packed("vadd", ins[0], ins[1], outs[0])
return tvm.te.decl_tensor_intrin(z.op, _intrin_func)
s = tvm.te.create_schedule(z.op)
s[z].tensorize(z.op.axis[0], _intrin_vadd())
tvm.lower(s, [x, y, z])
check(128, 16)
check_cache_write(129, 16)
check_compute_reuse()
def test_tensorize_matmul():
n = 1024
m = n
l = n
A = te.placeholder((n, l), name="A")
B = te.placeholder((m, l), name="B")
k = te.reduce_axis((0, l), name="k")
C = te.compute((n, m), lambda i, j: te.sum(B[j, k] * A[i, k], axis=k), name="C")
def check(factor):
s = te.create_schedule(C.op)
x, y = C.op.axis
yo, yi = s[C].split(y, factor=factor)
gemv = intrin_gemv(factor, l)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(ou |
t_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
s[C].reorder(yo, ro, yi, ri)
gemv = intrin_gemv(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor_no_reset(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
s[C].reorder(yo, ro, yi, ri)
gemv = intrin_gemv_no_reset(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map) |
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor_no_reset_multi_reduction(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
roo, roi = s[C].split(ro, factor=2)
s[C].reorder(yo, roo, roi, yi, ri)
gemv = intrin_gemv_no_reset(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
check(16)
check_rfactor(16, 16)
check_rfactor_no_reset(16, 16)
check_rfactor_no_reset_multi_reduction(16, 16)
def test_tensorize_op():
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
def op_intrin():
bh = 9
bw = 9
x = te.placeholder((5, 5), name="A")
y = te.compute((bh, bw), lambda i, j: x[idxd(j, 3) + idxm(i, |
3), idxm(j, 3) + idxd(i, 3)])
def intrin_func(ins, outs):
(xx,) = ins
zz = outs[0]
return tvm.tir.call_packed("op", xx, zz)
return te.decl_tensor_intrin(y.op, intrin_func, default_buffer_params={"offset_factor": 2})
A = te.placeholder((5, 5), name="A")
B = te.compute((9, 9), lambda i, j: A[idxd(j, 3) + idxm(i, 3), idxm(j, 3) + idxd(i, 3)])
bt = op_intrin()
s = te.create_schedule(B.op)
x, y = B.op.axis
s[B].tensorize(x, bt)
s = s.normalize()
tvm.lower(s, [A, B])
def test_tensorize_tensor_compute_op():
def intrin_multivadd(n):
n_a = te.var("n_a")
Ab = tvm.tir.decl_buffer((n,), "float32", strides=[n_a])
n_b = te.var("n_b")
Bb = tvm.tir.decl_buffer((n,), "float32", strides=[n_b])
n_c = te.var("n_c")
Cb = tvm.tir.decl_buffer((n,), "float32", strides=[n_c])
z = te.compute(
(n,),
lambda i: tvm.tir.call_extern(
"float32",
"vadd",
Ab.access_ptr("w", offset=n_a * i),
Bb.access_ptr("r", offset=n_b * i),
Cb.access_ptr("r", offset=n_c * i),
),
)
def intrin_func(ins, outs):
return tvm.tir.call_packed("multivadd")
return te.decl_tensor_intrin(z.op, intrin_func, name="multivadd")
def intrin_vadd(n):
dtype = "float32"
x = te.placeholder((n,), dtype=dtype, name="vx")
y = te.placeholder((n,), dtype=dtype, name="vy")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
def create_buffer(t):
return tvm.tir.decl_buffer(t.shape, t.dtype, name="W" + t.name, offset_factor=16)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"float32",
"vadd",
ins[0].access_ptr(" |
r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(
z.op, intrin_func, binds={x: create_buffer(x), y: create_buffer(y), z: create_buffer(z)}
)
M = 1024
factor = 16
dtype = "float32"
A = te.placeholder((M
B = te.placeholder((M
vadd = intrin_vadd(factor)
C = te.compute((M
s = te.create_schedule(C.op)
multivadd = intrin_multivadd(64)
s[C].tensorize(C.op.axis[0], multivadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
assert isinstance(stmt.body, tvm.tir.For)
assert stmt.body.loop_var.name == C.op.axis[0].var.name
if __name__ == "__main__":
test_tensorize_vadd()
test_tensorize_matmul()
test_tensorize_op()
test_tensorize_tensor_compute_op() |
import json |
import tvm
from tvm |
import te
from tvm |
import te
@tvm.te.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = te.reduce_axis((0, IC), name="ic")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
return te.compute(
(N, OC, OH, OW),
lambda i, oc, h, w: te.sum(
data[i, ic, h + dh, w + dw] * weight[oc, ic, dh, dw], axis=[ic, dh, dw]
),
)
def test_with():
n = te.size_var("n")
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((n, l), name="A")
B = te.placeholder((m, l), name="B")
with tvm.te.tag_scope(tag="gemm"):
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello": 1, "arr": [10, 12]},
)
assert C.op.tag == "gemm"
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.ir.load_json(tvm.ir.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = te.size_var("n")
c = te.size_var("c")
h = te.size_var("h")
w = te.size_var("w")
kh = te.size_var("kh")
kw = te.size_var("kw")
A = te.placeholder((n, c, h, w), name="A")
B = te.placeholder((c, c, kh, kw), name="B")
C = compute_conv(A, B)
assert C.op.tag == "conv"
assert len(C.op.attrs) == 0
def test_nested():
n = te.size_var("n")
c = te.size_var("c")
h = te.size_var("h")
w = te.size_var("w")
kh = te.size_var("kh")
kw = te.size_var("kw")
A = te.placeholder((n, c, h, w), name="A")
B = te.placeholder((c, c, kh, kw), name="B")
try:
with te.tag_scope(tag="conv"):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == |
"__main__":
test_with()
test_decorator()
test_nested() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.topi.nn.pooling |
import pool2d
def test_tensor():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
T = te.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k])
print(T)
print(T.op.body)
assert tuple(T.shape) == (m, n, l)
assert isinstance(A.op, tvm.te.PlaceholderOp)
assert A == A
assert T.op.output(0) == T
assert T.op.output(0).__hash__() == T.__hash__()
d = {T.op.output(0): 1}
assert d[T] == 1
assert T[0][0][0].astype("float16").dtype == "float16"
def test_rank_zero():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
scale = te.placeholder((), name="s")
k = te.reduce_axis((0, m), name="k")
T = te.compute((), lambda: te.sum(A[k] * scale(), axis=k))
print(T)
print(T.op.body)
assert tuple(T.shape) == ()
def test_conv1d():
n = te.size_var("n")
A = te.placeholder((n + 2), name="A")
def computeB(ii):
i = ii + 1
return A[i - 1] + A[i] + A[i + 1]
B = te.compute(n, computeB)
def test_tensor_slice():
n = te.size_var("n")
A = te.compute((n, n), lambda i, j: 1)
B = te.compute((n,), lambda i: A[0][i] + A[0][i])
def test_tensor_reduce_multi_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
k1 = te.reduce_axis((0, n), "k")
k2 = te.reduce_axis((0, m), "k")
C = te.compute((1,), lambda _: te.sum(A[k1, k2], axis=(k1, k2)))
C = te.compute((1,), lambda _: te.sum(A[k1, k2], axis=[k1, k2]))
def test_tensor_comm_reducer():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), "k")
mysum = te.comm_reducer(lambda x, y: x + y, lambda t: tvm.tir.const(0, dtype=t))
C = te.compute((m,), lambda i: mysum(A[i, k], axis=k))
def test_tensor_comm_reducer_overload():
m = te.size_var("m")
n = te.size_var("n")
mysum = te.comm_reducer(lambda x, y: x + y, lambda t: tvm.tir |
.const(0, dtype=t))
sum_res = mysum(m, n)
def test_tensor_reduce():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
T = te.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k])
rv = te.reduce_axis((0, A.shape[1]), "k")
C = te.compute((m, n), lambda i, j: te.sum(T(i, j, rv + 1), axis=rv))
C_json = tvm.ir.save_json(C)
C_loaded = tvm.ir.load_json(C_json)
assert isinstance(C_loaded, te.tensor.Tensor)
assert str(C_loaded) == str(C)
def test_tensor_reduce_multiout_with_cond():
def fcombine(x, y):
return x[0] + y[0], x[1] + y[1]
def fidentity(t0, t1):
return tvm.tir.const(0, t0), tvm.tir.const(1, t1)
mysum = te.comm_reducer(fcombine, fidentity, name="mysum")
m = te.var("m")
n = te.var("n")
idx = te.placeholder((m, n), name="idx", dtype="int32")
val = te.placeholder((m, n), name="val", dtype="int32")
k = te.reduce_axis((0, n), "k")
cond = te.floormod(k, 2) == 0
T0, T1 = te.compute((m,), lambda i: mysum((idx[i, k], val[i, k]), axis=k, where=cond), name="T")
def test_tensor_compute1():
m = 1024
factor = 16
dtype = "float32"
def intrin_vadd(n):
x = te.placeholder((n,))
y = te.placeholder((n,))
z = te.compute(x.shape, lambda i: x[i] + y[i])
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
outs[0].dtype,
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
vadd = intrin_vadd(factor)
A = te.placeholder((m
B = te.placeholder((m
C = te.compute((m
s = te.create_schedule(C.op) |
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
stmt = tvm.lower(s, [A, B, C])["main"].body
assert isinstance(stmt.body, tvm.tir.Evaluate)
def test_tensor_compute2():
M = 2048
N = 1024
L = 1024
factor = 16
factor1 = 32
factor2 = 32
dtype = "float32"
def intrin_gemm(m, n, l):
k = te.reduce_axis((0, l))
x = te.placeholder((m, l))
y = te.placeholder((n, l))
z = te.compute((m, n), lambda i, j: te.sum(x[i][k] * y[j][k], axis=k))
def intrin_func(ins, outs):
x_ptr = ins[0].access_ptr("r")
y_ptr = ins[1].access_ptr("r")
z_ptr = outs[0].access_ptr("w")
body = tvm.tir.call_packed("gemv", x_ptr, y_ptr, z_ptr, m, n, l)
reset = tvm.tir.call_packed("fill_zero", z_ptr, m, n)
update = tvm.tir.call_packed("gemv_add", x_ptr, y_ptr, z_ptr, m, n, l)
return body, reset, update
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
vgemm = intrin_gemm(factor1, factor2, factor)
A = te.placeholder((M
B = te.placeholder((N
k = te.reduce_axis((0, L
C = te.compute(
(M
lambda i, j: vgemm(
A[i, k, 0:factor1, 0:factor], B[j, k, 0:factor2, 0:factor], reduce_axis=k
),
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
stmt = tvm.lower(s, [A, B, C])["main"].body
assert isinstance(stmt.body.body[0], tvm.tir.Evaluate)
assert isinstance(stmt.body.body[1].body, tvm.tir.Evaluate)
def test_tensor_scan():
m = te.size_var("m")
n = te.size_var("n")
x = te.placeholder((m, n))
s = te.placeholder((m, n))
res = tvm.te.scan(
te.compute((1, n), lambda _, i: x[0, i]),
te.compute((m, n), lambda t, i: s[t - 1, i] + x[t, i]),
s,
)
assert tuple(res.shape) == (m, n)
def test_scan_mult |
i_out():
m = te.size_var("m")
n = te.size_var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + s2[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
assert r0.value_index == 0
assert r1.value_index == 1
json_str = tvm.ir.save_json(r0.op)
zz = tvm.ir.load_json(json_str)
assert isinstance(zz, tvm.te.ScanOp)
def test_extern():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
def extern_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
return tvm.tir.call_packed("myadd", ins[0].data, outs[0].data, m)
B = te.extern((m,), [A], extern_func)
assert tuple(B.shape) == (m,)
def test_extern_multi_out():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] * 10)
def extern_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
return tvm.tir.call_packed("myadd", ins[0].data, outs[0].data, outs[1].data, m)
res = te.extern([A.shape, A.shape], [A, B], extern_func)
assert len(res) == 2
assert res[1].value_index == 1
def test_tuple_inputs():
m = te.size_var("m")
n = te.size_var("n")
A0 = te.placeholder((m, n), name="A0")
A1 = te.placeholder((m, n), name="A1")
T0, T1 = te.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name="T")
s = te.create_schedule(T0.op)
for i in range(len(T0.shape)):
assert T0.shape[i] == T1.shape[i]
assert T0.op == T1.op
assert T0.value_index == 0
assert T1.value_index == 1
def test_tuple_with_different_deps():
m = te.size_var("m")
n = te.size_var("n")
A0 = te.placeholder |
((m, n), name="A1")
A1 = te.placeholder((m, n), name="A2")
B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name="B")
C = te.compute((m, n), lambda i, j: B0[i, j] + 4, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=10)
s[B0.op].compute_at(s[C], xo)
sch = s.normalize()
bounds = tvm.te.schedule.InferBound(sch)
stmt = tvm.te.schedule.ScheduleOps(sch, bounds)
def get_B1_realize(x):
if (
isinstance(x, tvm.tir.ProducerRealize)
and x.producer.op == B1.op
and x.producer.value_index == 1
):
ret.append(x)
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, get_B1_realize)
assert stmt.producer == C and len(ret) == 1
def test_tensor_inputs():
x = te.placeholder((1,), name="x")
y = te.compute(x.shape, lambda i: x[i] + x[i])
assert tuple(y.op.input_tensors) == (x,)
def test_tensor_pool():
def intrin_pool():
A = te.placeholder((64, 16, 16), name="A")
kh = te.reduce_axis((0, 3), name="kh")
kw = te.reduce_axis((0, 3), name="kw")
P = te.compute(
(64, 14, 14),
lambda c, oh, ow: tvm.te.max(A[c, oh + kh, ow + kw], axis=[kh, kw]),
name="p",
)
def intrin_func(ins, outs):
dinp = ins[0]
dout = outs[0]
return tvm.tir.call_packed("op", dinp, dout)
return te.decl_tensor_intrin(P.op, intrin_func, default_buffer_params={"offset_factor": 1})
A = te.placeholder((1, 64, 16, 16), name="A")
P = pool2d(
data=A, kernel=(3, 3), stride=(1, 1), dilation=(1, 1), padding=(0, 0, 0, 0), pool_type="max"
)
s = te.create_schedule(P.op)
_, oh, _, _ = P.op.axis
intrin = intrin_pool()
s[P].tensorize(oh, intrin)
tvm.lower(s, [A, P])
def test_tensor_scalar_mixed():
a = np.array(np.random.uniform(size=(10,)), "float32")
b = np.array(np.random.uniform(size=(1))[0], "float32")
c = np.arra |
y(np.random.uniform(size=(10,)), "float32")
@tvm.register_func("tvm.test_tensor_scalar_scale")
def my_scale(tensor, scalar, out):
out_np = tensor.numpy() * scalar.numpy()
tvm.nd.array(out_np).copyto(out)
A = te.placeholder(a.shape, name="A")
B = te.placeholder(b.shape, name="B")
C = te.extern(
a.shape,
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.test_tensor_scalar_scale", ins[0], ins[1], outs[0]
),
name="C",
)
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
ta = tvm.nd.array(a)
tb = tvm.nd.array(b)
tc = tvm.nd.array(c)
f(ta, tb, tc)
tvm.testing.assert_allclose(a * b, tc.numpy())
def test_tensor_scalar():
a = np.array(np.random.uniform(size=(1))[0], "float32")
b = np.array(0.0, "float32")
@tvm.register_func("tvm.test_tensor_scalar_copy")
def mycopy(x, y):
x.copyto(y)
A = te.placeholder(a.shape, name="A")
B = te.extern(
a.shape,
[A],
lambda ins, outs: tvm.tir.call_packed("tvm.test_tensor_scalar_copy", ins[0], outs[0]),
name="B",
)
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
ta = tvm.nd.array(a)
tb = tvm.nd.array(b)
f(ta, tb)
tvm.testing.assert_allclose(ta.numpy(), tb.numpy())
if __name__ == "__main__":
test_tensor()
test_rank_zero()
test_conv1d()
test_tensor_slice()
test_tensor_reduce_multi_axis()
test_tensor_comm_reducer()
test_tensor_comm_reducer_overload()
test_tensor_reduce()
test_tensor_reduce_multiout_with_cond()
test_tensor_compute1()
test_tensor_compute2()
test_tensor_scan()
test_scan_multi_out()
test_extern()
test_extern_multi_out()
test_tuple_inputs()
test_tuple_with_different_deps()
test_tensor_inputs()
test_tensor_pool()
test_tensor_scalar_mixed()
test_tensor_scalar() |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.topi.testing
from tvm.topi.utils |
import get_const_tuple |
import tvm.testing
def test_operator_type_and_tags():
k = 1
n = te.var("n")
A = te.placeholder((), name="A")
B = te.placeholder((10, 5), name="B")
B1 = B[0]
B2 = B[0, 0]
assert isinstance(k + n, tvm.tir.PrimExpr)
assert isinstance(n + n, tvm.tir.PrimExpr)
assert isinstance(k + A, te.tensor.Tensor)
assert isinstance(A + k, te.tensor.Tensor)
assert isinstance(n + A, te.tensor.Tensor)
assert isinstance(A + n, te.tensor.Tensor)
assert isinstance(A + A, te.tensor.Tensor)
assert isinstance(k + B, te.tensor.Tensor)
assert isinstance(B + k, te.tensor.Tensor)
assert isinstance(n + B, te.tensor.Tensor)
assert isinstance(B + n, te.tensor.Tensor)
assert isinstance(A + B, te.tensor.Tensor)
assert isinstance(B + A, te.tensor.Tensor)
assert isinstance(B + B, te.tensor.Tensor)
assert (k + B).op.tag == topi.tag.ELEMWISE
assert (B + k).op.tag == topi.tag.ELEMWISE
assert (n + B).op.tag == topi.tag.ELEMWISE
assert (B + n).op.tag == topi.tag.ELEMWISE
assert (A + B).op.tag == topi.tag.BROADCAST
assert (B + A).op.tag == topi.tag.BROADCAST
assert (B + B).op.tag == topi.tag.BROADCAST
assert isinstance(k + B2, tvm.tir.PrimExpr)
assert isinstance(B2 + k, tvm.tir.PrimExpr)
assert isinstance(n + B2, tvm.tir.PrimExpr)
assert isinstance(B2 + n, tvm.tir.PrimExpr)
assert isinstance(B2 + B2, tvm.tir.PrimExpr)
assert isinstance(B2 + A, te.tensor.Tensor)
assert isinstance(A + B2, te.tensor.Tensor)
assert isinstance(B2 + B, te.tensor.Tensor)
assert isinstance(B + B2, te.tensor.Tensor)
def test_combination():
k = 3
n = 5
m = 10
x = te.var("x")
A = te.placeholder((n, m), name="A")
B = te.placeholder((n, m), name="B")
C = te.placeholder((n, m), name="C")
D = k + A - B * C + x
s = te.create_schedule(D.op)
foo = tvm.build(s, [x, A, B, C, D], "llvm")
dev = tvm.cpu(0)
x = 2
a = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), dev)
b = tvm.n |
d.array(np.random.uniform(size=(n, m)).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=(n, m)).astype(C.dtype), dev)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
foo(x, a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), k + a.numpy() - b.numpy() * c.numpy() + x)
def verify_tensor_scalar_bop(shape, typ="add"):
"""Verify non-constant Tensor and scalar binary operations."""
sh = [te.size_var("n%d" % i) for i in range(0, len(shape))]
k = te.var("k")
A = te.placeholder(sh, name="A")
if typ == "add":
B = A + k
elif typ == "sub":
B = A - k
elif typ == "mul":
B = A * k
elif typ == "div":
B = A / k
else:
raise NotImplementedError()
def check_device(device):
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_elemwise_schedule(device)(B)
k_ = 2
foo = tvm.build(s, [A, B, k] + sh, device, name="tensor_scalar_" + typ)
a_npy = np.random.uniform(size=shape).astype(A.dtype)
if typ == "add":
b_npy = a_npy + k_
elif typ == "sub":
b_npy = a_npy - k_
elif typ == "mul":
b_npy = a_npy * k_
elif typ == "div":
b_npy = a_npy / k_
else:
raise NotImplementedError()
a_nd = tvm.nd.array(a_npy, dev)
b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev)
foo(a_nd, b_nd, k_, *shape)
tvm.testing.assert_allclose(b_nd.numpy(), b_npy, rtol=1e-5)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]:
check_device(device)
def verify_broadcast_bop(lhs_shape, rhs_shape, typ="add"):
A = te.placeholder(shape=lhs_shape, name="A")
B = te.placeholder(shape=rhs_shape, name="B")
if typ == "add": |
C = A + B
elif typ == "sub":
C = A - B
elif typ == "mul":
C = A * B
elif typ == "div":
C = A / B
else:
raise NotImplementedError()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_broadcast_schedule(device)(C)
foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + typ)
lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype)
if typ == "add":
out_npy = lhs_npy + rhs_npy
elif typ == "sub":
out_npy = lhs_npy - rhs_npy
elif typ == "mul":
out_npy = lhs_npy * rhs_npy
elif typ == "div":
rhs_npy = np.abs(rhs_npy) + 0.001
out_npy = lhs_npy / rhs_npy
else:
raise NotImplementedError()
lhs_nd = tvm.nd.array(lhs_npy, dev)
rhs_nd = tvm.nd.array(rhs_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
for _ in range(1):
foo(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]:
check_device(device)
@tvm.testing.uses_gpu
def verify_conv2d_scalar_bop(
batch, in_size, in_channel, num_filter, kernel, stride, padding, typ="add"
):
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device)
k = 10.0
dila |
tion = (1, 1)
with tvm.target.Target(device):
A = te.placeholder((batch, in_channel, in_size, in_size), name="A")
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
B = conv2d_nchw(A, W, stride, padding, dilation, A.dtype)
if typ == "add":
C = B + k
elif typ == "sub":
C = B - k
elif typ == "mul":
C = B * k
elif typ == "div":
C = B / k
else:
raise NotImplementedError()
s = schedule_conv2d_nchw([C])
foo = tvm.build(s, [A, W, B, C], device, name="conv2d_scalar_" + typ)
a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
b_npy = tvm.topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype)
if typ == "add":
c_npy = b_npy + k
elif typ == "sub":
c_npy = b_npy - k
elif typ == "mul":
c_npy = b_npy * k
elif typ == "div":
c_npy = b_npy / k
else:
raise NotImplementedError()
a_nd = tvm.nd.array(a_npy, dev)
w_nd = tvm.nd.array(w_npy, dev)
b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev)
c_nd = tvm.nd.array(np.empty(c_npy.shape).astype(C.dtype), dev)
foo(a_nd, w_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.numpy(), c_npy, rtol=1e-4, atol=1e-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]:
check_device(device)
@tvm.testing.uses_gpu
def test_tensor_scalar_bop():
verify_tensor_scalar_bop((1,), typ="add")
verify_tensor_scalar_bop((3, 5), typ="sub")
verify_tensor_scalar_bop((1, 3, 5), typ="mul")
verify_tensor_scalar_bop((2, 3, 1, 32), typ="div")
@tvm.testing.uses_gpu
def test_broadcast_bop() |
:
verify_broadcast_bop((2, 3), (), typ="add")
verify_broadcast_bop((5, 2, 3), (1,), typ="add")
verify_broadcast_bop((1, 32), (64, 32), typ="sub")
verify_broadcast_bop((5, 64, 128), (2, 5, 64, 1), typ="mul")
verify_broadcast_bop((2, 3, 1, 32), (64, 32), typ="div")
@tvm.testing.uses_gpu
def test_conv2d_scalar_bop():
verify_conv2d_scalar_bop(1, 16, 4, 4, 3, 1, 1, typ="add")
verify_conv2d_scalar_bop(1, 32, 2, 1, 3, 1, 1, typ="sub")
verify_conv2d_scalar_bop(1, 32, 1, 1, 3, 1, 1, typ="mul")
verify_conv2d_scalar_bop(1, 16, 2, 1, 3, 1, 1, typ="div")
if __name__ == "__main__":
test_operator_type_and_tags()
test_combination()
test_tensor_scalar_bop()
test_broadcast_bop()
test_conv2d_scalar_bop() |
import tvm
from tvm |
import te
def test_verify_compute():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
k_ = te.reduce_axis((0, m - 1), "k_")
f1 = lambda i: te.sum(A[i, k], axis=k)
f2 = lambda i: A[i, 0] + 1
f3 = lambda i: te.sum(A[i, k], axis=k) + 1
f4 = lambda i: A[i, 0] * (te.sum(A[i, k], axis=k) + 1)
f5 = lambda i: (te.sum(A[i, k], axis=k), A[i, 0] + 1)
f6 = lambda i: (te.sum(A[i, k], axis=k), te.sum(A[i, k_], axis=k_))
try:
B = te.compute((n,), f1, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
try:
B = te.compute((n,), f2, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
try:
B = te.compute((n,), f3, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
try:
B = te.compute((n,), f4, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
try:
B0, B1 = te.compute((n,), f5, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
try:
B0, B1 = te.compute((n,), f6, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
if __name__ == "__main__":
test_verify_compute() |
import numpy as np |
import tvm
from tvm |
import te |
import tvm.testing
def test_check_numerical_grads():
functions = [
lambda x: (x * x * x, 3 * x * x),
lambda x: (x * x, 2 * x),
lambda x: (np.abs(x), np.sign(x)),
lambda x: (np.log(np.abs(x)), 1 / x),
lambda x: (np.sqrt(np.abs(x)), np.sign(x) / (2 * np.sqrt(np.abs(x)))),
lambda x: (1 / x, -1 / (x * x)),
lambda x: (np.sign(np.sin(1 / x)), np.zeros_like(x)),
lambda x: (x * np.sin(1 / x), np.sin(1 / x) - np.cos(1 / x) / x),
lambda x: (np.sin(1 / x), -np.cos(1 / x) / (x * x)),
lambda x: (np.tan(x), 1.0 / (np.cos(x) * np.cos(x))),
]
np.random.seed(0)
min_x = 0.5
for func in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x: np.sum(func(x)[0])
grads = [func(x_input)[1]]
tvm.testing.check_numerical_grads(func_forw, [x_input], grads)
for f1 in functions:
for f2 in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
y_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = [f1(x_input)[1], f2(y_input)[1]]
tvm.testing.check_numerical_grads(func_forw, [x_input, y_input], grads)
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = {"x": f1(x_input)[1], "y": f2(y_input)[1]}
tvm.testing.check_numerical_grads(func_forw, {"x": x_input, "y": y_input}, grads)
def _noise1(x, atol=1e-2, rtol=0.1):
sqrt_n = np.sqrt(float(np.prod(x.shape)))
tol = 2 * (np.linalg.norm(x) * rtol + atol * sqrt_n)
noise = np.random.normal(size=x.shape)
noise = tol * noise / np.linalg.norm(noise)
return x + noise
def _noise2(x, atol=1e-2, rtol=0.1):
sqrt_n = np.sqrt(float(np.prod(x.shape)))
tol = 2 * (np.linalg.norm(x) * rtol + atol * sqrt_n)
n = np.random.randint(np.pr |
od(x.shape))
noise = np.zeros_like(x)
noise.reshape(-1)[n] = tol
return x + noise
for f1 in functions:
for f2 in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
y_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = [_noise1(f1(x_input)[1]), _noise1(f2(y_input)[1])]
try:
tvm.testing.check_numerical_grads(func_forw, [x_input, y_input], grads)
except AssertionError as e:
pass
else:
raise AssertionError("tvm.testing.check_numerical_grads didn't raise an exception")
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = {"x": _noise2(f1(x_input)[1]), "y": _noise2(f2(y_input)[1])}
try:
tvm.testing.check_numerical_grads(func_forw, {"x": x_input, "y": y_input}, grads)
except AssertionError as e:
pass
else:
raise AssertionError("tvm.testing.check_numerical_grads didn't raise an exception")
if __name__ == "__main__":
test_tvm.testing.check_numerical_grads() |
import pytest |
import tvm
from tvm |
import tir
from tvm.script |
import tir as T
@T.prim_func
def primfunc_global_allocates(placeholder_144: T.handle, placeholder_145: T.handle, placeholder_146: T.handle, T_cast_48: T.handle) -> None:
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_13", "tir.noalias": True})
placeholder_147 = T.match_buffer(placeholder_144, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_148 = T.match_buffer(placeholder_145, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_149 = T.match_buffer(placeholder_146, [512], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_49 = T.match_buffer(T_cast_48, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
PaddedInput_22 = T.decl_buffer([131072], "int16")
DepthwiseConv2d_9 = T.decl_buffer([100352], "int32")
for i1_29, i2_39, i3_40 in T.grid(16, 16, 512):
PaddedInput_22[(((i1_29*8192) + (i2_39*512)) + i3_40)] = T.if_then_else(((((1 <= i1_29) and (i1_29 < 15)) and (1 <= i2_39)) and (i2_39 < 15)), placeholder_147[((((i1_29*7168) + (i2_39*512)) + i3_40) - 7680)], T.int16(0), dtype="int16")
for i_9, j_9, c_9 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = 0
for di_9, dj_9 in T.grid(3, 3):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = (DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] + (PaddedInput_22[(((((i_9*8192) + (di_9*8192)) + (j_9*512)) + (dj_9*512)) + c_9)].astype("int32")*placeholder_148[(((di_9*1536) + (dj_9*512)) + c_9)].astype("int32")))
for ax1_27, ax2_28, ax3_30 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] = (DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] + placeholder_149[ax3_30])
for i1_30, i2_40, i3_41 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)] = T.q_multiply_shift(DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)], 1269068532, 31, - |
4, dtype="int32")
for i1_31, i2_41, i3_42 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)] = T.max(T.max(DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)], 255), 0)
for ax1_28, ax2_29, ax3_31 in T.grid(14, 14, 512):
PaddedInput_22[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)] = DepthwiseConv2d_9[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)].astype("uint8")
for ax1_29, ax2_30, ax3_32 in T.grid(14, 14, 512):
T_cast_49[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)] = PaddedInput_22[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)].astype("int16")
@T.prim_func
def primfunc_local_allocates(placeholder_162: T.handle, placeholder_163: T.handle, placeholder_164: T.handle, T_cast_76: T.handle) -> None:
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_9", "tir.noalias": True})
placeholder_165 = T.match_buffer(placeholder_162, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_166 = T.match_buffer(placeholder_163, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_167 = T.match_buffer(placeholder_164, [512], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_77 = T.match_buffer(T_cast_76, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
sid_21 = T.allocate_const([0,1,2,3,4,5,6,7], "int8", [8])
PaddedInput_25 = T.decl_buffer([131072], "int16")
for i1_35, i2_46, i3_47 in T.grid(16, 16, 512):
PaddedInput_25[(((i1_35*8192) + (i2_46*512)) + i3_47)] = T.if_then_else(((((1 <= i1_35) and (i1_35 < 15)) and (1 <= i2_46)) and (i2_46 < 15)), placeholder_165[((((i1_35*7168) + (i2_46*512)) + i3_47) - 7680)], T.int16(0), dtype="int16")
T_add_11 = T.decl_buffer([100352], "int32")
with T.decl_buffer([100352], "int32") as DepthwiseConv2d_11:
for i_11, j_11, c_11 in T.grid(14, 14, 512):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = 0
for di_11, dj_11 in |
T.grid(3, 3):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = (DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] + (PaddedInput_25[(((((i_11*8192) + (di_11*8192)) + (j_11*512)) + (dj_11*512)) + c_11)].astype("int32")*placeholder_166[(((di_11*1536) + (dj_11*512)) + c_11)].astype("int32")))
for ax1_44, ax2_45, ax3_47 in T.grid(14, 14, 512):
T_add_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] = (DepthwiseConv2d_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] + placeholder_167[ax3_47])
compute_22 = T.decl_buffer([100352], "int32")
with T.decl_buffer([100352], "int32") as T_cast_78:
for ax1_45, ax2_46, ax3_48 in T.grid(14, 14, 512):
T_cast_78[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)] = T_add_11[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)]
for i1_36, i2_47, i3_48 in T.grid(14, 14, 512):
compute_22[(((i1_36*7168) + (i2_47*512)) + i3_48)] = T.q_multiply_shift(T_cast_78[(((i1_36*7168) + (i2_47*512)) + i3_48)], 1948805937, 31, -5, dtype="int32")
T_cast_79 = T.decl_buffer([100352], "uint8")
with T.decl_buffer([100352], "int32") as compute_23:
for i1_37, i2_48, i3_49 in T.grid(14, 14, 512):
compute_23[(((i1_37*7168) + (i2_48*512)) + i3_49)] = T.max(T.max(compute_22[(((i1_37*7168) + (i2_48*512)) + i3_49)], 255), 0)
for ax1_46, ax2_47, ax3_49 in T.grid(14, 14, 512):
T_cast_79[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)] = compute_23[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)].astype("uint8")
for ax1_47, ax2_48, ax3_50 in T.grid(14, 14, 512):
T_cast_77[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)] = T_cast_79[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)].astype("int16")
@pytest.mark.parametrize("alignment,size,consts", [(1, 663552, 0), (10, 663560, 0)])
def test_global_allocates(alignment, size, consts):
primfunc = primfunc_global_allocates
assert tvm.tir.analysis.calculate_constant_bytes(primfunc, alignment) == consts
assert tvm.tir.analysis.calculate_workspace_bytes |
(primfunc, alignment) == size
@pytest.mark.parametrize("alignment,size,consts", [(1, 1566720, 8), (100, 1567100, 100)])
def test_local_allocates(alignment, size, consts):
primfunc = primfunc_local_allocates
assert tvm.tir.analysis.calculate_constant_bytes(primfunc, alignment) == consts
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
if __name__ == "__main__":
test_global_allocates()
test_local_allocates() |
import tvm
from tvm |
import tir
from tvm.script |
import tir as T
@T.prim_func
def buffer_load_store_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
for ii, jj in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [ii, jj])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += C[i * 4 + ii, k * 4 + kk]
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += (
D[j * 4 + jj, k * 4 + kk] * C[i * 4 + ii, k * 4 + kk]
)
@T.prim_func
def buffer_opaque_access(b: T.handle, c: T.handle) -> None:
B = T.match_buffer(b, [16, 16], "float32")
C = T.match_buffer(c, [16, 16], "float32")
with T.block():
T.reads([])
T.writes(B[0:16, 0:16])
A = T.decl_buffer([256], "float32")
for i, j in T.grid(16, 16):
A[i * 16 + j] = 1
for i in range(0, 16):
for j in range(0, 16):
T.evaluate(A[i * 16 + j])
for j in range(0, 16):
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, T.float32(0), dtype="handle"))
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj]
@T.prim_func
def lca_is_func_root(a: T.handle) -> None:
A = T.match_buffer(a, [0, 0], "float32")
A[0, 0] = 1.0
@T.prim_func
def match_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 12 |
8), "float32")
for i, j in T.grid(8, 8):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
T.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
B0 = T.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = T.match_buffer(B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8))
for ii, jj in T.grid(16, 16):
with T.block("AAA"):
vii, vjj = T.axis.remap("SS", [ii, jj])
AA = T.match_buffer(A[vii, vjj], ())
AA[()] = 1.0
T.evaluate(B0.data)
T.evaluate(B1.data)
@T.prim_func
def global_buffer_with_blockidx(
a: T.Buffer[(1, 32), "int32"], b: T.Buffer[(1, 32), "int32"]
) -> None:
for i0 in T.thread_binding(0, 1, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("copy"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(a[i, j])
T.writes(b[i, j])
b[i, j] = a[i, j]
def test_buffer_load_store():
func = buffer_load_store_func
A, B = [func.buffer_map[x] for x in func.params]
C, D = func.body.block.alloc_buffers
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
assert lca[A] == func.body.block
reduce_block = root_block.body[1].body.body.body.block
assert lca[B] == reduce_block
loop_jj = reduce_block.body.body
assert lca[C] == loop_jj
loop_kk = loop_jj.body[1]
assert lca[D] == loop_kk
def test_opaque_access():
func = buffer_opaque_access
B, C = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
assert lca[B] == func.body.block
assert lca[C] == root_block.body[1].body.body.block
def test_l |
ca_func_root():
func = lca_is_func_root
(A,) = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
assert lca[A] is None
def test_match_buffer():
func = match_buffer_func
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
assert lca[A] == block_inner
assert lca[B] == block
def test_global_buffer_with_blockidx():
func = global_buffer_with_blockidx
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
blockidx_loop = root_block.body
assert lca[A] == blockidx_loop
assert lca[B] == blockidx_loop
if __name__ == "__main__":
test_buffer_load_store()
test_opaque_access()
test_lca_func_root()
test_match_buffer()
test_global_buffer_with_blockidx() |
import sys |
import pytest |
import tvm.testing
from tvm.ir |
import IRModule
from tvm.meta_schedule.testing.te_workload |
import create_te_workload
from tvm.script |
import tir as T
from tvm.tir.analysis |
import estimate_tir_flops
@pytest.mark.parametrize(
"workload, flops",
[
("C1D", 6291456),
("C2D", 236027904),
("C3D", 13217562624),
("CAP", 75497472),
("DEP", 7225344),
("DIL", 223552896),
("GMM", 4194304),
("GRP", 28901376),
("T2D", 268435456),
("CBR", 239239168),
("TBG", 25165824),
("NRM", 131072),
("SFM", 262144),
],
)
def test_te_workload(workload, flops):
te_workload = create_te_workload(workload, 0)
mod = IRModule({"main": te_workload})
assert float(flops) == estimate_tir_flops(mod)
@T.prim_func
def flops_with_let(a: T.Buffer[16, "float32"]):
for i in range(8):
j = i + 8
a[j] = a[i]
def test_flops_with_let():
flops = estimate_tir_flops(IRModule({"main": flops_with_let}))
assert flops == 8
@T.prim_func
def flops_with_if(a: T.Buffer[16, "float32"], b: T.Buffer[16, "float32"]):
for i in range(16):
if i % 2 == 0:
a[i] = b[i]
else:
if i % 3 == 0:
a[i] = b[i - 1] + b[i - 2]
def test_flops_with_if():
flops = estimate_tir_flops(IRModule({"main": flops_with_if}))
assert flops == 16
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_equal_expr():
x = te.var("x")
y = te.var("y")
def func1():
return x + y + 1
def func2():
return te.exp(tvm.tir.truncdiv((x + y + 1) * y, 4))
assert tvm.tir.analysis.expr_deep_equal(func1(), func1())
assert tvm.tir.analysis.expr_deep_equal(func2(), func2())
assert not tvm.tir.analysis.expr_deep_equal(func2(), func1())
if __name__ == "__main__":
test_equal_expr()
|
import pytest |
import tvm
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.ir |
import Range
@T.prim_func
def func() -> None:
A = T.alloc_buffer((128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
with T.block():
T.reads([B[0, 0], C[0:16, 0:16], A[4:12, 4:12]])
T.writes([A[0:12, 0:12]])
for i, j in T.grid(8, 8):
A[i, j] = B[0, 0] + C[0, 0]
for i, j in T.grid(2, 2):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 + 4 : vi * 4 + 8, vj * 4 + 4 : vj * 4 + 8], C[12:16, 12:16]])
T.writes([A[vi * 4 + 4 : vi * 4 + 8, vj * 4 + 4 : vj * 4 + 8]])
for i, j in T.grid(4, 4):
A[vi * 4 + 4 + i, vj * 4 + 4 + j] += C[i + 12, j + 12]
T.evaluate(D.data)
@T.prim_func
def match_buffer_func() -> None:
with T.block("root"):
A = T.alloc_buffer((128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
T.reads([])
T.writes([])
for i, j in T.grid(8, 8):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
T.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
AA = T.match_buffer(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16], (16, 16))
B0 = T.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = T.match_buffer(
B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8)
)
for ii, jj in T.grid(16, 16):
with T.block("AAA"):
vii, vjj = T.axis.remap("SS", [ii, jj])
T.reads([])
T.writes(AA[vii, vjj])
AAA = T.match_buffer(AA[vii, vjj], ())
AAA[()] = 1.0 |
T.evaluate(B0.data)
T.evaluate(B1.data)
@T.prim_func
def opaque_block_func() -> None:
with T.block("root"):
A = T.alloc_buffer((16, 16), "float32")
B = T.alloc_buffer((16, 16), "float32")
T.reads([])
T.writes([])
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes([B[i, 0:16]])
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[i, j])
B[i, j] = A[i, j] + 1.0
@T.prim_func
def opaque_access_func() -> None:
A = T.alloc_buffer([1024])
B = T.alloc_buffer([1024])
for i in T.serial(0, 8):
with T.block():
v = T.axis.S(8, i)
T.reads([A[v * 128 : v * 128 + 128]])
T.writes([B[v * 128 : v * 128 + 128]])
T.evaluate(
T.call_extern("test", B.data, v * 128, 128, A.data, v * 128, 128, dtype="float32")
)
@T.prim_func
def opaque_access_with_tvm_access_ptr_func() -> None:
A = T.alloc_buffer([1024])
B = T.alloc_buffer([1024])
C = T.alloc_buffer([1024])
with T.block("opaque"):
T.reads(A[0:1024], C[0:1024])
T.writes(B[0:1024], C[0:1024])
T.evaluate(A.access_ptr("r"))
T.evaluate(B.access_ptr("w"))
T.evaluate(C.access_ptr("rw"))
@T.prim_func
def access_in_if_then_else_func() -> None:
A = T.alloc_buffer([8])
B = T.alloc_buffer([8])
with T.block():
T.reads([A[0:5]])
T.writes([B[0:8]])
for i in T.serial(0, 8):
B[i] = T.if_then_else(i < 5, A[i], 0.0, dtype="float32")
@T.prim_func
def access_in_branch_func() -> None:
A = T.alloc_buffer([8])
B = T.alloc_buffer([8])
with T.block():
T.reads([A[0:7]])
T.writes([B[0:8]])
for i in T.serial(0, 8):
if i < 5:
B[i] = A[i] + 1.0
else:
B[i] = A[i - 1] |
@T.prim_func
def gemm() -> None:
A = T.alloc_buffer([16, 16], "float32")
B = T.alloc_buffer([16, 16], "float32")
C = T.alloc_buffer([16, 16], "float32")
for i, j, k, ii, jj in T.grid(4, 4, 16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
T.reads(A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = 0
C[vi, vj] += A[vi, vk] * B[vj, vk]
@T.prim_func
def decomposed_gemm() -> None:
A = T.alloc_buffer([16, 16], "float32")
B = T.alloc_buffer([16, 16], "float32")
C = T.alloc_buffer([16, 16], "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
T.reads([])
T.writes(C[vi, vj])
C[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
C[vi, vj] += A[vi, vk] * B[vj, vk]
@T.prim_func
def access_of_padding_pattern() -> None:
X = T.alloc_buffer([28, 28])
X_pad = T.alloc_buffer([32, 32])
Y = T.alloc_buffer([28, 28])
for i, j in T.grid(32, 32):
with T.block("padding"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([X[vi - 2, vj - 2]])
T.writes([X_pad[vi, vj]])
X_pad[vi, vj] = T.if_then_else(
2 <= vi and vi < 30 and 2 <= vj and vj < 30, X[vi - 2, vj - 2], 0.0, dtype="float32"
)
with T.block("padding_reverse"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([X_pad[vi, vj]])
T.writes([Y[vi - 2, vj - 2]]) |
if 2 <= vi and vi < 30 and 2 <= vj and vj < 30:
Y[vi - 2, vj - 2] = X_pad[vi, vj]
def test_block_access_region_detector():
block = func.body.block.body.block
alloc_buffers = func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
D = alloc_buffers[-1]
tvm.ir.assert_structural_equal(
[tvm.tir.BufferRegion(D, [Range(0, 128), Range(0, 128)])], ret[2]
)
def test_opaque_block():
alloc_buffers = opaque_block_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
block0 = opaque_block_func.body.block.body.body.block
ret = tir.analysis.get_block_access_region(block0, buffer_var_map)
tvm.ir.assert_structural_equal(block0.reads, ret[0])
tvm.ir.assert_structural_equal(block0.writes, ret[1])
block1 = block0.body.body.block
ret = tir.analysis.get_block_access_region(block1, buffer_var_map)
tvm.ir.assert_structural_equal(block1.reads, ret[0])
tvm.ir.assert_structural_equal(block1.writes, ret[1])
def test_opaque_access():
block = opaque_access_func.body.block.body.body.block
alloc_buffers = opaque_access_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_opaque_access_with_tvm_access_ptr():
block = opaque_access_with_tvm_access_ptr_func.body.block.body.block
alloc_buffers = opaque_access_with_tvm_access_ptr_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffer |
s}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret0[0])
tvm.ir.assert_structural_equal(block.writes, ret0[1])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_match_buffer():
root_block = match_buffer_func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
alloc_buffers = match_buffer_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.writes, ret[1])
tvm.ir.assert_structural_equal(block.reads, ret[2])
ret = tir.analysis.get_block_access_region(block_inner, buffer_var_map)
tvm.ir.assert_structural_equal([], ret[1])
for match_buffer in block.match_buffers:
target_buffer = match_buffer.buffer
buffer_var_map[target_buffer.data] = target_buffer
ret = tir.analysis.get_block_access_region(block_inner, buffer_var_map)
tvm.ir.assert_structural_equal(block_inner.reads, ret[0])
tvm.ir.assert_structural_equal(block_inner.writes, ret[1])
def test_access_in_if_then_else_func():
block = access_in_if_then_else_func.body.block.body.block
alloc_buffers = access_in_if_then_else_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_access_in_branch_func():
block = access_in_branch_func.body.block.body.block
alloc_buffers = access_in_branch_fun |
c.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_access_of_padding_pattern():
s = tvm.tir.schedule.Schedule(access_of_padding_pattern)
alloc_buffers = s.get_sref(s.get_block("root")).stmt.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
def do_compare_buffer_region(region, expect):
assert region.buffer == expect.buffer
analyzer = tvm.arith.Analyzer()
for observed_range, expected_range in zip(region.region, expect.region):
analyzer.can_prove_equal(observed_range.min, expected_range.min)
analyzer.can_prove_equal(observed_range.extent, expected_range.extent)
def do_check_block(block_name):
block = s.get_sref(s.get_block(block_name)).stmt
expect_reads = block.reads
expect_writes = block.writes
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
for i, read in enumerate(ret[0]):
do_compare_buffer_region(read, expect_reads[i])
for i, write in enumerate(ret[1]):
do_compare_buffer_region(write, expect_writes[i])
do_check_block("padding")
do_check_block("padding_reverse")
def test_access_of_reduction():
block = gemm.body.block.body.body.body.body.body.body.block
alloc_buffers = gemm.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
def test_access_of_decompose_reduction():
init = decomposed_gemm.body.block.body.body.body[0].body.body.block
update = decomposed_gemm.body.block.body.body.body[1].body.body.bo |
dy.block
alloc_buffers = decomposed_gemm.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
for block in [init, update]:
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
if __name__ == "__main__":
test_block_access_region_detector()
test_opaque_block()
test_opaque_access()
test_opaque_access_with_tvm_access_ptr()
test_match_buffer()
test_access_in_if_then_else_func()
test_access_in_branch_func()
test_access_of_padding_pattern()
test_access_of_reduction()
test_access_of_decompose_reduction() |
import pytest |
import tvm
from tvm.script |
import tir as T
@T.prim_func
def bad_load(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
B[0, 0] = A[2, 2]
@T.prim_func
def bad_load_loop(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
for i in range(3):
B[i, 0] = A[i, 2]
@T.prim_func
def bad_store(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
B[0, 3] = A[1, 2]
@T.prim_func
def bad_store_loop(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
for i in range(3):
B[0, i] = A[1, i]
@T.prim_func
def unknown_bounds(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
N = T.var("int32")
for i in range(3):
B[0, N] = A[1, i]
def test_oob_load():
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_load))
assert "buffer A" in err.value.args[0]
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_load_loop))
assert "buffer A" in err.value.args[0]
def test_oob_store():
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_store))
assert "buffer B" in err.value.args[0]
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_store_loop))
assert "buffer B" in err.value.args[0]
def test_unknown_bounds():
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(unknown_bounds))
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te, topi
from tvm.meta_schedule.testing.te_workload import conv2d_winograd_nhwc, matmul
from tvm.tir.analysis import find_anchor_block
def test_matmul_add():
n = m = k = 128
A, B, C = matmul(n, m, k)
mod = tvm.IRModule()
mod["main"] = te.create_prim_func([A, B, C + A])
block = find_anchor_block(mod)
assert block.name_hint == "C"
def test_winograd():
mod = tvm.IRModule()
mod["main"] = te.create_prim_func(conv2d_winograd_nhwc(1, 14, 14, 128, 128, 6))
block = find_anchor_block(mod)
assert block.name_hint == "bgemm"
def test_no_anchor_block():
inp = te.placeholder((10,), name="input")
out = topi.nn.relu(inp + 1.0)
mod = tvm.IRModule()
mod["main"] = te.create_prim_func([inp, out])
assert find_anchor_block(mod) is None
if __name__ == "__main__":
pytest.main([__file__])
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
@pytest.mark.xfail
def test_loop_dependent_allocate():
N = te.size_var("N")
A = te.placeholder((2 * N,), "float32", "A")
C = te.compute((N,), lambda i: A[2 * i] + A[i + 1], name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "local", [C])
s[AA].compute_at(s[C], s[C].op.axis[0])
# this line should fail due to IRUseDefAnalysis sees an allocate statement
# referencing undefined variable
tvm.lower(s, [A, C])
if __name__ == "__main__":
test_loop_dependent_allocate()
|
"""Test gpu code verifier""" |
import tvm
from tvm |
import te
from tvm |
import topi |
import tvm.testing |
import tvm.topi.testing
def get_verify_pass(valid, **kwargs):
def _fverify(f, *_):
valid[0] = tvm.tir.analysis.verify_gpu_code(f, kwargs)
return f
return tvm.tir.transform.prim_func_pass(_fverify, opt_level=0)
@tvm.testing.requires_gpu
def test_shared_memory():
def check_shared_memory(storage_scope, dtype):
N = 1024
M = 128
tvm_type = tvm.runtime.DataType(dtype)
type_size = tvm_type.bits
A = te.placeholder((N,), name="A", dtype=dtype)
B = te.compute((N,), lambda i: A[i], name="B")
s = te.create_schedule([B.op])
AA = s.cache_read(A, storage_scope, [B])
o, i = s[B].split(s[B].op.axis[0], M)
s[AA].compute_at(s[B], o)
s[B].bind(o, te.thread_axis("blockIdx.x"))
s[B].bind(i, te.thread_axis("threadIdx.x"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=type_size * M - 1,
max_threads_per_block=M,
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=type_size * M,
max_threads_per_block=M,
),
) |
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
check_shared_memory("shared", "float32")
check_shared_memory("shared", "int8x4")
check_shared_memory("shared.dyn", "float32")
@tvm.testing.requires_gpu
def test_local_memory():
N = 1024
M = 128
A = te.placeholder((N,), name="A", dtype="float32")
B = te.compute((N,), lambda i: A[i], name="B")
s = te.create_schedule([B.op])
AA = s.cache_read(A, "local", [B])
o, i = s[B].split(s[B].op.axis[0], M)
s[AA].compute_at(s[B], o)
s[B].bind(o, te.thread_axis("blockIdx.x"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_local_memory_per_block=4 * M - 1, max_threads_per_block=1
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_local_memory_per_block=4 * M, max_threads_per_block=1
),
)
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
@tvm.testing.requires_gpu
def test_num_thread():
N = 1024
M = 128
A = te.placeholder((N,), name="A", dtype="float32")
B = te.compute((N,), lambda i: A[i], name="B")
s = te.create_schedule([B.op])
o, i = s[B].split(s[B].op.axis[0], M)
s[B].bind(o, te.thread_axis("threadIdx.x"))
s[B].bind(i, te.thread_axis("threadIdx.y"))
for |
target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N - 1
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N
),
)
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=0,
max_threads_per_block=N,
max_thread_y=M - 1,
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=0,
max_threads_per_block=N,
max_thread_y=M,
),
)
]
}
):
t |
vm.build(s, [A, B], target)
assert valid[0]
@tvm.testing.requires_gpu
def test_multiple_kernels():
N = 1024
A = te.placeholder((N, N), name="A")
B = te.compute((N, N), lambda i, j: A[i, j])
C = te.compute((N, N), lambda i, j: B[i, j])
s = te.create_schedule([C.op])
s[C].bind(s[C].op.axis[1], te.thread_axis("threadIdx.x"))
s[B].bind(s[B].op.axis[1], te.thread_axis("threadIdx.x"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N - 1
),
)
]
}
):
tvm.build(s, [A, C], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N
),
)
]
}
):
tvm.build(s, [A, C], target)
assert valid[0]
@tvm.testing.requires_gpu
def test_wrong_bind():
N = 1024
A = te.placeholder((N, N - 1), name="A")
B = te.compute((N, N - 1), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
s[B].bind(s[B].op.axis[0], te.thread_axis("threadIdx.x"))
s[B].bind(s[B].op.axis[1], te.thread_axis("threadIdx.x"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [(2, get_verify_pass(valid, max |
_threads_per_block=N * N))]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
@tvm.testing.requires_gpu
def test_vectorize():
N = 1024
A = te.placeholder((N, N), name="A")
B = te.compute((N, N), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
i, j = s[B].op.axis
s[B].bind(i, te.thread_axis("blockIdx.x"))
jo, ji = s[B].split(j, factor=64)
s[B].bind(jo, te.thread_axis("threadIdx.x"))
s[B].vectorize(ji)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]}
):
tvm.lower(s, [A, B])
assert not valid[0]
@tvm.testing.requires_gpu
def test_vectorize_half():
N = 1024
A = te.placeholder((N, N), name="A", dtype="float16")
B = te.compute((N, N), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
i, j = s[B].op.axis
s[B].bind(i, te.thread_axis("blockIdx.x"))
jo, ji = s[B].split(j, factor=8)
s[B].bind(jo, te.thread_axis("threadIdx.x"))
s[B].vectorize(ji)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]}
):
tvm.lower(s, [A, B])
assert valid[0]
@tvm.testing.requires_gpu
def test_vectorize_strided():
N = 1024
A = te.placeholder((N, N), name="A", dtype="float16")
B = te.compute((N, N), lambda i, j: A[j, i])
s = te.create_schedule([B.op])
i, j = s[B].op.axis
s[B].bind(i, te.thread_axis("blockIdx.x"))
jo, ji = s[B].split(j, factor=8)
s[B].vectorize(ji)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None |
]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]}
):
tvm.lower(s, [A, B])
assert not valid[0]
@tvm.testing.requires_gpu
def test_vthread():
N = 1024
A = te.placeholder((N, 16), name="A")
B = te.compute((N, 16), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
s[B].bind(s[B].op.axis[0], te.thread_axis("blockIdx.x"))
s[B].bind(s[B].op.axis[1], te.thread_axis("vthread"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
for phase in [1, 2]:
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(phase, get_verify_pass(valid, max_vthread=16))]}
):
tvm.build(s, [A, B], target)
assert valid[0]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(phase, get_verify_pass(valid, max_vthread=15))]}
):
tvm.build(s, [A, B], target)
assert not valid[0]
@tvm.testing.requires_gpu
def test_redundant_kernels():
dtype = "float32"
A = te.placeholder(shape=(1,), name="A", dtype=dtype)
B = te.placeholder(shape=(1,), name="B", dtype=dtype)
C = te.placeholder(shape=(1,), name="C", dtype=dtype)
D = topi.less(A, C)
E = topi.less(B, C)
F = topi.logical_or(D, E)
G = topi.identity(F)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
print("Running on target: %s" % target)
valid = [None]
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(G)
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_kernels=1))]}
):
tvm.build(s, [A, B, C, G], target)
assert valid[0]
if __name__ == "__main__":
tvm.testing.ma |
in() |
import tvm |
import pytest
from tvm |
import te |
import tvm.testing
gpu_devices = ["cuda", "opencl", "metal", "vulkan"]
other_devices = ["llvm", "ext_dev"]
@tvm.testing.uses_gpu
def test_verify_memory_all_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
mod = tvm.lower(s, [A, B])
for dev_type in gpu_devices + other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
@tvm.testing.uses_gpu
def test_verify_memory_not_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
s = te.create_schedule(B.op)
mod = tvm.lower(s, [A, B])
for dev_type in gpu_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
with pytest.raises(RuntimeError):
tvm.tir.transform.VerifyMemory()(binded_mod)
for dev_type in other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
@tvm.testing.uses_gpu
def test_verify_memory_partially_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
C = te.compute(B.shape, lambda i: B[i] + 2.0, name="C")
D = te.compute(C.shape, lambda i: C[i] + 2.0, name="D")
s = te.create_schedule([B.op, C.op, D.op]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.