text
stringlengths 1
2.05k
|
---|
import tvm
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import run_opt_pass |
import tvm.testing |
import tvm.topi.testing
def test_fuse_simple():
"""Simple testcase."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
return relay.Function([x], w)
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
f1 = relay.Function([x], w)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
return relay.Function([x], y)
z = before()
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_conv2d_fuse():
"""Test fusion case of conv2d"""
def before(dshape):
x = relay.var("x", shape=dshape)
x = relay.add(x, relay.const(1, "float32"))
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=16)
y1 = relay.add(relay.const(1, "float32"), y)
y = relay.add(y, y1)
z2 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(1, 1), padding=(0, 0), channels=16)
z3 = relay.nn.conv2d(y, relay.var("w3"), kernel_size=(3, 3), padding=(1, 1), channels=16)
z = relay.add(z2, z3)
return relay.Function(relay.analysis.free_vars(z), z)
def expected(dshape):
x = relay.var("p0", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
f0 = relay.Function([x], y)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=16)
y1 = relay.add(relay.const(1, "float32"), y)
y = relay.add(y, y1)
f1 = relay.Function([x, w], y)
f1 = f1.w |
ith_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
z2 = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=16)
f2 = relay.Function([x, w], z2)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
offset = relay.var("p2", shape=dshape)
z3 = relay.nn.conv2d(x, w, kernel_size=(1, 1), padding=(0, 0), channels=16)
z3 = relay.add(z3, offset)
f3 = relay.Function([x, w, offset], z3)
f3 = f3.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
y = relay.Call(f1, [y, relay.var("w1")])
z2 = relay.Call(f2, [y, relay.var("w3")])
z3 = relay.Call(f3, [y, relay.var("w2"), z2])
z = z3
return relay.Function(relay.analysis.free_vars(z), z)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_concatenate():
"""Test fusion case involving concat op and Tuple node"""
def before(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
upsampled = relay.nn.upsampling(pooled, scale_h=2, scale_w=2, layout="NCHW")
concat = relay.concatenate((upsampled, x), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
f0 = relay.Function([x], pooled)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p0 = relay.var("p0", shape=(dshape[0], |
dshape[1], dshape[2]
p1 = relay.var("p1", shape=dshape)
upsampled = relay.nn.upsampling(p0, scale_h=2, scale_w=2, layout="NCHW")
concat = relay.concatenate((upsampled, p1), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
f1 = relay.Function([p0, p1], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y, x])
return relay.Function([x], z)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_tuple_root():
"""Test fusion case where Tuple node is the root in its group"""
def before(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
upsampled = relay.nn.upsampling(pooled, scale_h=2, scale_w=2, layout="NCHW")
out = relay.Tuple((upsampled, x))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
f0 = relay.Function([x], pooled)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p0 = relay.var("p0", shape=(dshape[0], dshape[1], dshape[2]
upsampled = relay.nn.upsampling(p0, scale_h=2, scale_w=2, layout="NCHW")
f1 = relay.Function([p0], upsampled)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y])
tup = relay.Tuple((z, x))
return relay.Function([x], t |
up)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_stop_fusion():
def before(dshape):
x = relay.var("x", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
y = relay.annotation.stop_fusion(y)
z = relay.exp(y)
return relay.Function([x], z)
def expected(dshape):
x = relay.var("p0", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
f1 = relay.Function([x], y)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("p01", shape=dshape)
y = relay.exp(x)
f2 = relay.Function([x], y)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f1, [x])
z = relay.Call(f2, [y])
return relay.Function([x], z)
dshape = (10, 20)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_fuse_myia_regression():
def before(dshape, dtype):
x = relay.var("x", shape=dshape, dtype=dtype)
y = relay.var("y", shape=dshape, dtype=dtype)
sb = relay.ScopeBuilder()
with sb.if_scope(relay.op.greater(x, y)):
sb.ret(relay.Function([], x))
with sb.else_scope():
sb.ret(relay.Function([], y))
return relay.Function([x, y], relay.Call(sb.get(), []))
def expected(dshape, dtype):
x = relay.var("x", shape=dshape, dtype=dtype)
y = relay.var("y", shape=dshape, dtype=dtype)
sb = relay.ScopeBuilder()
p1 = relay.var("p1", shape=dshape, dtype=dtype) |
p2 = relay.var("p2", shape=dshape, dtype=dtype)
fused_gt = relay.Function([p1, p2], relay.op.greater(p1, p2))
fused_gt = fused_gt.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
with sb.if_scope(fused_gt(x, y)):
sb.ret(relay.Function([], x))
with sb.else_scope():
sb.ret(relay.Function([], y))
return relay.Function([x, y], relay.Call(sb.get(), []))
dshape = ()
dtype = "int64"
f = before(dshape, dtype)
zz = run_opt_pass(f, transform.FuseOps())
after = run_opt_pass(expected(dshape, dtype), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_fuse_tuple_get_elemwise():
def before(dim):
X = relay.var("X", shape=(1, dim))
W = relay.var("W", shape=(3 * dim, dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def expected(dim):
p0 = relay.var("p0", shape=(1, dim))
p1 = relay.var("p1", shape=(3 * dim, dim))
matmul = relay.nn.dense(p0, p1)
f0 = relay.Function([p0, p1], matmul)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=(1, 3 * dim))
splitted = relay.split(p01, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
f1 = relay.Function([p01], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
X = relay.var("X", shape=(1, dim))
W = relay.var("W", shape=(3 * dim, dim))
y = relay.Call(f0, [X, W])
z = relay.Call(f1, [y])
return relay.Function([X, W], z)
dim = 10
z = before(dim)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
asse |
rt not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dim), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_tuple_get_root():
def before(dim):
X = relay.var("X", shape=(1, 3 * dim))
W = relay.var("W", shape=(dim, dim))
splitted = relay.split(X, indices_or_sections=3, axis=1)
out = relay.nn.dense(splitted[0], W)
return relay.Function([X, W], out)
def expected(dim):
p0 = relay.var("p0", shape=(1, 3 * dim))
splitted = relay.split(p0, indices_or_sections=3, axis=1)
out = splitted[0]
f0 = relay.Function([p0], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=(1, dim))
p1 = relay.var("p1", shape=(dim, dim))
out = relay.nn.dense(p01, p1)
f1 = relay.Function([p01, p1], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
X = relay.var("X", shape=(1, 3 * dim))
W = relay.var("W", shape=(dim, dim))
y = relay.Call(f0, [X])
z = relay.Call(f1, [y, W])
return relay.Function([X, W], z)
dim = 10
z = before(dim)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dim), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def fuse0(mod):
mod = relay.transform.InferType()(mod)
return relay.transform.FuseOps(fuse_opt_level=0)(mod)
def fuse2(mod):
mod = relay.transform.InferType()(mod)
return relay.transform.FuseOps(fuse_opt_level=2)(mod)
def test_tuple_intermediate():
def before(x):
inj = relay.squeeze(x)
y1 = relay.add(inj, relay.const(1, "float32"))
tmp = relay.squeeze(inj)
tmp = relay.add(tmp, relay.const(1, "float32"))
y2 = relay.add(tmp, relay.const(1, "float32"))
y3 = rela |
y.add(inj, relay.const(1, "float32"))
concat = relay.concatenate((y1, y2, y3), axis=1)
out_inj = relay.squeeze(concat)
out = relay.add(out_inj, relay.const(1, "float32"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(p0):
f0 = before(p0)
f1 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f1, [x])
return relay.Function([x], y)
dshape = (1, 16, 64, 64)
x = relay.var("x", shape=dshape)
orig = before(x)
fuse0(tvm.IRModule.from_expr(orig))
m = fuse2(tvm.IRModule.from_expr(orig))
relay.build(m, "llvm")
after = run_opt_pass(expected(x), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_tuple_consecutive():
def gen_intermediate_tuple(x):
y1 = relay.add(x, relay.const(1, "float32"))
y2 = relay.add(x, relay.const(1, "float32"))
y3 = relay.add(x, relay.const(1, "float32"))
concat = relay.concatenate((y1, y2, y3), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
return out
def gen_consecutive_tuple(x):
y1 = gen_intermediate_tuple(x)
y2 = gen_intermediate_tuple(x)
y3 = gen_intermediate_tuple(x)
concat = relay.concatenate((y1, y2, y3), axis=1)
return concat
def before(x):
concat = gen_consecutive_tuple(x)
pooled = relay.nn.max_pool2d(concat, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
out = relay.add(pooled, relay.const(1, "float32"))
out2 = relay.add(out, relay.const(1, "float32"))
out_tup = relay.Tuple((out, out2))
return relay.Function(relay.analysis.free_vars(out_tup), out_tup)
def expected(dshape):
p0 = relay.var("p0", shape=dshape)
concat = gen_consecutive_tuple(p0)
f0 = relay.Function([p0], concat)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=(1, dsh |
ape[1] * 9, dshape[2], dshape[3]))
pooled = relay.nn.max_pool2d(p01, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
out = relay.add(pooled, relay.const(1, "float32"))
f1 = relay.Function([p01], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p02 = relay.var("p02", shape=(1, dshape[1] * 9, dshape[2]
out = relay.add(p02, relay.const(1, "float32"))
f2 = relay.Function([p02], out)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y])
z2 = relay.Call(f2, [z])
return relay.Function([x], relay.Tuple((z, z2)))
dshape = (1, 16, 64, 64)
x = relay.var("x", shape=dshape)
orig = before(x)
fuse0(tvm.IRModule.from_expr(orig))
m = fuse2(tvm.IRModule.from_expr(orig))
relay.build(m, "llvm")
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_inception_like():
def conv(data):
y = relay.nn.conv2d(data, relay.var("w"), kernel_size=(3, 3), padding=(1, 1), channels=16)
return relay.nn.relu(data=y)
def inception_like(data):
c0 = conv(data)
c1 = conv(data)
return relay.concatenate((c0, c1), axis=1)
def before(dshape):
x = relay.var("x", shape=dshape)
in1 = inception_like(x)
in2 = inception_like(in1)
return relay.Function(relay.analysis.free_vars(in2), in2)
def expected(dshape):
p0 = relay.var("p0", shape=dshape)
c = conv(p0)
f0 = relay.Function(relay.analysis.free_vars(c), c)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=dshape)
c = conv(p01)
f1 = relay.Function(relay.analysis.free_vars(c), c)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p02 = relay.var("p02", shape=dshape)
p12 = relay.var("p12", shape= |
dshape)
concat1 = relay.concatenate((p02, p12), axis=1)
f_concat1 = relay.Function([p02, p12], concat1)
f_concat1 = f_concat1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
dshape2 = (dshape[0], dshape[1] * 2, dshape[2], dshape[3])
p03 = relay.var("p03", shape=dshape2)
c = conv(p03)
f2 = relay.Function(relay.analysis.free_vars(c), c)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p04 = relay.var("p04", shape=dshape2)
c = conv(p04)
f3 = relay.Function(relay.analysis.free_vars(c), c)
f3 = f3.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p05 = relay.var("p05", shape=dshape)
p15 = relay.var("p15", shape=dshape)
concat2 = relay.concatenate((p05, p15), axis=1)
f_concat2 = relay.Function([p05, p15], concat2)
f_concat2 = f_concat2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
c1 = relay.Call(f0, [x, relay.var("w1")])
c2 = relay.Call(f1, [x, relay.var("w2")])
concat = relay.Call(f_concat1, [c1, c2])
c3 = relay.Call(f2, [concat, relay.var("w3")])
c4 = relay.Call(f3, [concat, relay.var("w4")])
out = relay.Call(f_concat2, [c3, c4])
return relay.Function(relay.analysis.free_vars(out), out)
dshape = (1, 16, 64, 64)
orig = before(dshape)
fuse0(tvm.IRModule.from_expr(orig))
m = fuse2(tvm.IRModule.from_expr(orig))
relay.build(m, "llvm")
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_fuse_parallel_injective():
"""Test fusing parallel injective ops to an elemwise op."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.squeeze(y)
u = relay.transpose(y, axes=[0, 1])
w = relay.left_shift(z, u)
return relay.Function([x], w)
def expected():
x = relay.va |
r("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.squeeze(y)
u = relay.transpose(y, axes=[0, 1])
w = relay.left_shift(z, u)
f1 = relay.Function([x], w)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
return relay.Function([x], y)
z = before()
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_immutable():
"""Verify the fusion pass won't change original module."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], w)
return mod
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
f1 = relay.Function([x], w)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
return mod
mod = transform.InferType()(before())
new_mod = transform.FuseOps(fuse_opt_level=2)(mod)
assert tvm.ir.structural_equal(mod, transform.InferType()(before()))
assert tvm.ir.structural_equal(new_mod, transform.InferType()(expected()))
def test_split():
"""Test that the result is well formed."""
x = relay.var("x", shape=(6, 9))
y = relay.split(x, 3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
c = relay.TupleGetItem(y, 2)
mod = tvm.IRModule()
mod["m |
ain"] = relay.Function([x], a + relay.RefRead(relay.RefCreate(b)) + c)
mod = transform.InferType()(mod)
mod = transform.FuseOps()(mod)
def test_fuse_max():
"""Test the constraint of number of nodes in op fusion."""
def before(n):
x = relay.var("x", shape=(10, 20))
y = x
for i in range(n):
y = relay.exp(y)
return relay.Function([x], y)
def expected(n, max_fused_ops):
x = relay.var("p", shape=(10, 20))
y = x
for i in range(max_fused_ops):
y = relay.exp(y)
f1 = relay.Function([x], y)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
z = relay.Call(f1, [x])
xx = relay.var("pp", shape=(10, 20))
yy = xx
for i in range(n - max_fused_ops):
yy = relay.exp(yy)
f2 = relay.Function([xx], yy)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
zz = relay.Call(f2, [z])
return relay.Function([x], zz)
max_fused_ops = 256
n = 300
z = before(n)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(n, max_fused_ops), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
max_fused_ops = 10
n = 20
z = before(n)
after = run_opt_pass(expected(n, max_fused_ops), transform.InferType())
with tvm.transform.PassContext(config={"relay.FuseOps.max_depth": max_fused_ops}):
zz = run_opt_pass(z, transform.FuseOps())
assert tvm.ir.structural_equal(zz, after)
link_params = tvm.testing.parameter(False, True)
def test_fuse_take(link_params):
"""Test fusion case involving concat and take"""
def before():
shape = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
x = relay.var("x", shape=shape)
concat = relay.concatenate([x, x], axis=-1)
out = relay.op.take(concat, indices=relay.const([0], dtype |
="int64"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(link_params):
shape1 = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
shape2 = (tvm.tir.const(1, "int64"),)
x = relay.var("x", shape=shape1)
p0 = relay.var("p0", shape=shape1)
p1 = relay.var("p1", shape=shape2, dtype="int64")
c = relay.const([0], dtype="int64")
concat = relay.concatenate([p0, p0], axis=-1)
out = relay.op.take(concat, indices=c if link_params else p1)
f0 = relay.Function([p0] if link_params else [p0, p1], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
y = relay.Call(f0, [x] if link_params else [x, c])
return relay.Function([x], y)
after = run_opt_pass(expected(link_params), transform.InferType())
with tvm.transform.PassContext(opt_level=2, config={"relay.FuseOps.link_params": link_params}):
m = run_opt_pass(before(), transform.InferType())
m = run_opt_pass(m, transform.FuseOps())
assert tvm.ir.structural_equal(m, after)
relay.build(m, "llvm")
def test_fuse_gather_nd(link_params):
"""Test fusion case involving concat and gather_nd"""
def before():
shape = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
x = relay.var("x", shape=shape)
concat = relay.concatenate([x, x], axis=-1)
out = relay.gather_nd(concat, indices=relay.expr.const([[0, 1], [1, 0]], dtype="int64"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(link_params):
shape1 = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
shape2 = (tvm.tir.const(2, "int64"), tvm.tir.const(2, "int64"))
x = relay.var("x", shape=shape1)
p0 = relay.var("p0", shape=shape1)
p1 = relay.var("p1", shape=shape2, dtype="int64")
c = relay.const([[0, 1], [1, 0]], dtype="int64")
concat = relay.concatenate([p0, p0], axis=-1)
out = relay.gather_nd(concat, indices=c if |
link_params else p1)
f0 = relay.Function([p0] if link_params else [p0, p1], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
y = relay.Call(f0, [x] if link_params else [x, c])
return relay.Function([x], y)
after = run_opt_pass(expected(link_params), transform.InferType())
with tvm.transform.PassContext(opt_level=2, config={"relay.FuseOps.link_params": link_params}):
m = run_opt_pass(before(), transform.InferType())
m = run_opt_pass(m, transform.FuseOps())
assert tvm.ir.structural_equal(m, after)
relay.build(m, "llvm")
@tvm.testing.uses_gpu
def test_fuse_bcast_reduce_scalar():
"""Test fusion case with broadcast and reduction involving scalar"""
def before():
x = relay.var("x", shape=(), dtype="int32")
less = relay.less(x, relay.const(10, dtype="int32"))
z = relay.min(less)
return relay.Function([x], z)
def expected():
p0 = relay.var("p0", shape=(), dtype="int32")
less = relay.less(p0, relay.const(10, dtype="int32"))
z0 = relay.min(less)
f0 = relay.Function([p0], z0)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(), dtype="int32")
f = relay.Call(f0, [x])
return relay.Function([x], f)
orig = before()
m = fuse2(tvm.IRModule.from_expr(orig))
for tgt, dev in tvm.testing.enabled_targets():
relay.build(m, tgt)
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_fuse_max_diamond():
def create_diamond(x, branch_len):
x1 = x
x2 = x
for _ in range(branch_len):
x1 = relay.exp(x1)
x2 = relay.exp(x2)
return relay.add(x1, x2)
def before(branch_len, num_diamond):
x = relay.var("x", shape=(10, 20))
out = x
for _ in range(num_diamond):
out = create_diamond(out, branch_len)
return relay.Function([x], out) |
def after(branch_len, num_diamond):
def create_diamond_func(inp):
inp_var = relay.var("p", shape=(10, 20))
d = create_diamond(inp_var, branch_len)
f = relay.Function([inp_var], d)
f = f.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return relay.Call(f, [inp])
inp = relay.var("x", shape=(10, 20))
out = inp
for _ in range(num_diamond):
out = create_diamond_func(out)
return relay.Function([inp], out)
branch_len = 5
max_fused_ops = branch_len * 2 + 1
num_diamond = 3
with tvm.transform.PassContext(config={"relay.FuseOps.max_depth": max_fused_ops}):
fused = run_opt_pass(before(branch_len, num_diamond), transform.FuseOps())
expected = run_opt_pass(after(branch_len, num_diamond), transform.InferType())
assert tvm.ir.structural_equal(fused, expected)
def test_fuse_dynamic_squeeze_slice_take():
input_data = [
np.random.random([1, 2, 4]).astype("float32"),
np.array([0]).astype("int64"),
]
x = relay.var("p0107", shape=(relay.Any(), relay.Any(), 4), dtype="float32")
take_val = relay.var("p166", shape=(relay.Any(),), dtype="int64")
squeeze = relay.op.squeeze(x, axis=[0])
strided_slice = relay.op.strided_slice(
squeeze, begin=[0, 0], end=[15130, 2147483647], strides=[1, 1]
)
take = relay.op.take(strided_slice, take_val, axis=0)
mod = tvm.IRModule.from_expr(take)
result = relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*input_data
)
np_result = np.squeeze(input_data[0][:, input_data[1][0], :], axis=0)
assert np.allclose(result.numpy(), np_result)
@tvm.testing.uses_gpu
def test_fuse_softmax():
"""Test if softmax can be fused with following ops."""
channel_size = 16
def before():
x = relay.var("x", shape=(16, channel_size))
softmax = relay.nn.softmax(x)
out = relay.cast(softmax, "float16")
return relay.Fu |
nction([x], out)
def expected():
p0 = relay.var("p0", shape=(16, channel_size))
softmax = relay.nn.softmax(p0)
out = relay.cast(softmax, "float16")
x = relay.var("x", shape=(16, channel_size))
f0 = relay.Function([p0], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
y = relay.Call(f0, [x])
return relay.Function([x], y)
orig = before()
m = fuse2(tvm.IRModule.from_expr(orig))
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
inp = np.random.randn(16, channel_size).astype("float32")
ref = tvm.topi.testing.softmax_python(inp).astype("float16")
for tgt, dev in tvm.testing.enabled_targets():
ex = relay.create_executor("graph", mod=m, device=dev, target=tgt)
result = ex.evaluate()(inp).numpy()
tvm.testing.assert_allclose(result, ref, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
pytest.main([__pfile__]) |
import collections |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import GlobalVar
from tvm.relay.analysis |
import free_vars, free_type_vars
from tvm.relay |
import create_executor, transform
from tvm.relay.transform |
import gradient
from tvm.relay.prelude |
import Prelude
from tvm.relay.testing |
import (
make_nat_expr,
run_infer_type,
check_grad,
rand,
count_ops,
) |
import tvm.relay.op as op
def test_fo_id():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode="first_order"))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), x.numpy())
tvm.testing.assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_id():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), x.numpy())
tvm.testing.assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_relu():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], op.nn.relu(x))
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
def test_add():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), 2 * x.numpy())
tvm.testing.a |
ssert_allclose(grad.numpy(), 2 * np.ones_like(x.numpy()))
def test_check_grad():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], x + y)
check_grad(func)
def test_temp_add():
scope = relay.ScopeBuilder()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = scope.let("y", x + x)
scope.ret(y + y)
func = relay.Function([x], scope.get())
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
def test_sub():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x - x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), np.zeros_like(x.numpy()))
tvm.testing.assert_allclose(grad.numpy(), np.zeros_like(x.numpy()))
def test_broadcast_add():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = "float32"
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.numpy()
y_np = y_nd.numpy()
expected_forward = x_np + y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x + y)
func = run_infer_type(func)
full_func = run_infer_type(gradient(func))
assert full_func |
.checked_type == relay.FuncType(
[t1, t2],
relay.TupleType(
[relay.TensorType(expected_forward.shape, dtype), relay.TupleType([t1, t2])]
),
)
forward, (grad_x, grad_y) = create_executor().evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
tvm.testing.assert_allclose(
grad_x.numpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True)
)
tvm.testing.assert_allclose(
grad_y.numpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0),
)
def test_broadcast_subtract():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = "float32"
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.numpy()
y_np = y_nd.numpy()
expected_forward = x_np - y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x - y)
func = run_infer_type(func)
full_func = run_infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType(
[t1, t2],
relay.TupleType(
[relay.TensorType(expected_forward.shape, dtype), relay.TupleType([t1, t2])]
),
)
forward, (grad_x, grad_y) = create_executor().evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
tvm.testing.assert_allclose(
grad_x.numpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True)
)
tvm.testing.assert_allclose(
grad_y.numpy(),
-np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0),
)
def _test_tuple(mode):
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay.var("z", t)
if mode == "higher_order":
tup = relay.Var("tup")
func = relay.Function(
[x, y, z],
relay.L |
et(
tup,
relay.Tuple([x, y, z]),
relay.TupleGetItem(tup, 0)
+ relay.TupleGetItem(tup, 1)
- relay.TupleGetItem(tup, 2),
),
)
else:
tup = relay.Tuple([x, y, z])
func = relay.Function(
[x, y, z],
relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) - relay.TupleGetItem(tup, 2),
)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode=mode))
assert back_func.checked_type == relay.FuncType(
[t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])])
)
x_nd = rand(dtype, *shape)
y_nd = rand(dtype, *shape)
z_nd = rand(dtype, *shape)
x_np = x_nd.numpy()
y_np = y_nd.numpy()
z_np = z_nd.numpy()
expected_forward = x_np + y_np - z_np
forward, (grad_x, grad_y, grad_z) = create_executor().evaluate(back_func)(x_nd, y_nd, z_nd)
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.numpy(), np.ones_like(grad_x.numpy()))
tvm.testing.assert_allclose(grad_y.numpy(), np.ones_like(grad_y.numpy()))
tvm.testing.assert_allclose(grad_z.numpy(), -1 * np.ones_like(grad_z.numpy()))
def _test_tuple_argument(mode):
shape = (2, 3)
dtype = "float32"
tensor_type = relay.TensorType(shape, dtype)
fields = 3
tuple_type = relay.TupleType([tensor_type] * fields)
tup = relay.var("tup", type_annotation=tuple_type)
body = relay.TupleGetItem(tup, 0)
for i in range(1, fields):
body = relay.add(body, relay.TupleGetItem(tup, i))
func = relay.Function([tup], body)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode=mode))
xs = [rand(dtype, *shape) for _ in range(fields)]
xs_np = np.array([x.numpy() for x in xs])
expected_forward = np.sum(xs_np, axis=0)
forward, grad = create_executor().evaluate(back_func)(tuple(xs))
tvm.testing.assert_allclose(forward.numpy(), expected_ |
forward)
for field in grad[0]:
tvm.testing.assert_allclose(field.numpy(), np.ones_like(field.numpy()))
def test_tuple():
_test_tuple("higher_order")
def test_tuple_first_order():
_test_tuple("first_order")
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_tuple_argument():
_test_tuple_argument("higher_order")
def test_tuple_argument_first_order():
_test_tuple_argument("first_order")
def test_pow():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat_iterate = mod.get_global_var("nat_iterate")
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
mod["main"] = func
mod = transform.InferType()(mod)
mod["main"] = gradient(mod["main"], mod=mod)
m = transform.InferType()(mod)
back_func = m["main"]
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
i_nd = rand(dtype, *shape)
forward, (grad_i,) = create_executor(mod=mod).evaluate(back_func)(i_nd)
tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy())
tvm.testing.assert_allclose(grad_i.numpy(), 8 * np.ones_like(grad_i.numpy()))
def test_ref():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
r = relay.Var("r")
u = relay.Var("u")
body = relay.RefRead(r)
body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body)
body = relay.Let(r, relay.RefCreate(x), body)
func = relay.Function([x], body)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
forward, (grad_x,) = create_executor().evaluate(back_func)(x_nd)
tvm.testing.assert_allclose(f |
orward.numpy(), 2 * x_nd.numpy())
tvm.testing.assert_allclose(grad_x.numpy(), 2 * np.ones_like(grad_x.numpy()))
def test_square_second_order():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x * x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
y = relay.var("y", t)
back_func_adjusted = relay.Function(
[y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0)
)
back_func_adjusted = run_infer_type(back_func_adjusted)
back_back_func = run_infer_type(gradient(back_func_adjusted))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
forward, (grad_x,) = create_executor().evaluate(back_back_func)(x_nd)
tvm.testing.assert_allclose(forward.numpy(), 2 * x_nd.numpy())
tvm.testing.assert_allclose(grad_x.numpy(), 2 * np.ones_like(grad_x.numpy()))
def test_if():
x = relay.var("x", shape=(1, 16, 64, 64))
y = relay.var("y", shape=(1, 16, 64, 64))
cond = relay.var("cond", shape=(), dtype="uint1")
net = relay.If(cond, x, y)
net = relay.log(net)
func = relay.Function(free_vars(net), net)
func = run_infer_type(func)
net = gradient(func, mode="higher_order")
net = run_infer_type(net)
def test_grad_tuple():
scope = relay.ScopeBuilder()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = scope.let("y", x + x)
scope.ret(relay.Tuple([y + y, y]))
func = relay.Function([x], scope.get())
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType(
[t], relay.TupleType([relay.TupleType([t, t]), relay.TupleType([t])])
)
x = rand(dtype, *shape)
(forward_four, forward_two), (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward_four.numpy(), 4 * x.numpy( |
))
tvm.testing.assert_allclose(forward_two.numpy(), 2 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
def test_concat():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
rt = relay.TensorType((10, 20), dtype)
x = relay.var("x", t)
y = op.concatenate([x, x], axis=1)
func = relay.Function([x], y)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
tvm.ir.assert_structural_equal(
back_func.checked_type, relay.FuncType([t], relay.TupleType([rt, relay.TupleType([t])]))
)
def test_no_duplication():
x = tvm.relay.Var("x", type_annotation=tvm.relay.TensorType([12, 12]))
y = tvm.relay.Var("y", type_annotation=tvm.relay.TensorType([12, 12]))
xy = tvm.relay.nn.dense(x, y)
m = tvm.relay.sum(xy, keepdims=True)
s = tvm.relay.sum(xy - m)
fn = tvm.relay.Function([x, y], s)
fn = run_infer_type(fn)
gr = tvm.relay.transform.gradient(fn, mode="first_order")
counts = count_ops(gr)
assert counts["nn.dense"] == 3, "We expect 3 dense (1 forward, two backward)"
def test_no_duplication_tuples():
x = tvm.relay.Var("x", type_annotation=tvm.relay.TensorType([12, 12]))
y = tvm.relay.Var("y", type_annotation=tvm.relay.TensorType([12, 12]))
xy = tvm.relay.nn.dense(x, y)
t = relay.Tuple([xy, xy])
m = tvm.relay.sum(xy, keepdims=True)
s = tvm.relay.sum(relay.TupleGetItem(t, 0) - m)
fn = tvm.relay.Function([x, y], s)
fn = run_infer_type(fn)
gr = tvm.relay.transform.gradient(fn, mode="first_order")
counts = count_ops(gr)
assert counts["nn.dense"] == 3, "We expect 3 dense (1 forward, two backward)"
def test_global_function():
m = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.Var("x", t)
d = GlobalVar("double")
m[d] = relay.Function([x], x + x)
y = relay.Var("y", t)
q = GlobalVar("q")
m[q] = relay.Function([y], d(d(y)))
g = Gl |
obalVar("grad")
m = tvm.relay.transform.InferType()(m)
m[g] = tvm.relay.transform.gradient(q, m)
m = tvm.relay.transform.InferType()(m)
back_func = m[g]
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor(mod=m).evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import relay
def get_recursive_count_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
return mod, sum_up
def test_call_chain_inline_leaf():
"""Test when only leaf call is inlined.
The call graph is like the following:
main
/ \
g1 g2
/
g11(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
g11 = relay.GlobalVar("g11")
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[g11] = fn11
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + g11(x1))
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1 |
, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + x1)
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_call_chain_inline_multiple_levels():
"""Test when only leaf call is inlined.
The call graph is like the following:
main
/ \
g1(inline) g2
/
g11(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
g11 = relay.GlobalVar("g11")
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[g11] = fn11
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + g11(x1))
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder() |
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = p0 + p1 + p0
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_call_chain_inline_multiple_levels_extern_compiler():
"""Test when only leaf call is inlined.
The call graph is like the following:
main
/ \
g1(inline) g2
/
g11(inline, external compiler)
"""
def get_mod():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
g11 = relay.GlobalVar("g11")
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn11 = fn11.with_attr("Compiler", "a")
mod[g11] = fn11
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + g11(x1))
fn1 = relay.Function([x1, y1], sb.get())
fn1 = |
fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn11 = fn11.with_attr("Compiler", "a")
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = p0 + p1 + fn11(p0)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_recursive_call_with_global():
def get_mod():
mod = tvm.IRModule({})
x = relay.var("x", shape=[], dtype="int32")
fn0 = relay.Function([x], x)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
gx = relay.GlobalVar("gx")
mod[gx] = fn0
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", sha |
pe=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
global_call = gx(i)
rec_call = relay.Call(sum_up, [one_less]) + global_call
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
return mod
def expected():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less]) + i
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_recursive_called():
mod, sum_up = get_recursive_count_loop()
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
ref_mod = mod
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
def test_recursive_not_called():
def get_mod():
mod, sum_up = get_recursive_count_loop() |
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x1 = relay.var("x1", shape=(2, 2))
fn1 = relay.Function([x1], x1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
mod["main"] = relay.Function([x, y], x + y + g1(x))
return mod
def expected():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
mod["main"] = relay.Function([x, y], x + y + x)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
ref_mod = expected()
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
def test_recursive_not_called_extern_compiler():
def get_mod():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x1 = relay.var("x1", shape=(2, 2))
fn1 = relay.Function([x1], x1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
mod["main"] = relay.Function([x, y], x + y + g1(x))
return mod
def expected():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x1 = relay.var("x1", shape=(2, 2))
fn1 = relay.Function([x1], x1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
mod["main"] = relay.Function([x, y], x + y + fn1(x))
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
ref_mod = expected()
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
def test_globalvar_as_call_arg():
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay. |
ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = p0 + p1
call_fn2 = p2 - p3
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_globalvar_as_call_arg_extern_compiler():
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm |
("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = relay.Call(fn1, [p0, p1])
call_fn2 = relay.Call(fn2, [p2, p3])
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_inline_globalvar_without_args():
def get_mod():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
g2 = relay.GlobalVar("g2")
mod[g1] = fn1
mod = relay.transform.I |
nferType()(mod)
mod[g2] = fn2
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, g1, g2), []))
return relay.transform.InferType()(mod)
def expected():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, fn1, fn2), []))
return relay.transform.InferType()(mod)
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_inline_globalvar_without_args_extern_compiler():
def get_mod():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
g1 = relay.GlobalVar("g1")
g2 = relay.GlobalVar("g2")
mod[g1] = fn1
mod[g2] = fn2
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, g1, g2), []))
return mod
def expected():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, fn1, fn2), []))
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.struct |
ural_equal(mod, expected(), map_free_vars=True)
def test_globalvar_called_by_multiple_functions():
"""Test when only leaf call is inlined.
The call graph is like the following:
main g0
/ \ /
g1 g2(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
z0 = relay.var("z0", shape=(3, 5))
fn0 = relay.Function([x0, y0, z0], g2(x0, y0) + z0)
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn2 = p2 - p3
mod["main"] = relay.Function([p0, p1, p2, p3], g1(p0, |
p1) * call_fn2)
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
z0 = relay.var("z0", shape=(3, 5))
fn0 = relay.Function([x0, y0, z0], x0 - y0 + z0)
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_entry_with_inline():
"""Test entry function with inline
The call graph is like the following:
g1(inline) g2(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + y1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - y2)
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, get_mod(), map_free_vars=True)
def test_callee_not_inline():
"""Test entry function with inline
The call graph is like the following:
main
|
g2(inline)
|
g1
"""
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + y1)
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn |
2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, get_mod(), map_free_vars=True)
def test_callee_not_inline_leaf_inline():
"""Test entry function with inline
The call graph is like the following:
main
|
g2(inline)
|
g1
|
g0(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
fn0 = relay.Function([x0, y0], x0 * y0)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + g0(x1, y1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + x1 * y1)
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_callee_not_inline_leaf_inline_extern_compiler():
"""Test entry function with inline
The call graph is l |
ike the following:
main
|
g2(inline)
|
g1
|
g0(inline, external compiler)
"""
def get_mod():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
fn0 = relay.Function([x0, y0], x0 * y0)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn0 = fn0.with_attr("Compiler", "aa")
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + g0(x1, y1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
def expected():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
fn0 = relay.Function([x0, y0], x0 * y0)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn0 = fn0.with_attr("Compiler", "aa")
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + fn0(x1, y1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
if __name__ == "__main__":
pytest.main() |
""" Instrument test cases.
""" |
import pytest |
import tvm |
import tvm.relay
from tvm.relay |
import op
from tvm.ir.instrument |
import PassTimingInstrument, pass_instrument
def get_test_model():
x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"]
e1 = op.add(x, y)
e2 = op.subtract(x, z)
e3 = op.multiply(e1, e1 / e2)
return tvm.IRModule.from_expr(e3 + e2)
def test_pass_timing_instrument():
pass_timing = PassTimingInstrument()
tvm.transform.PassContext.current().override_instruments([pass_timing])
mod = get_test_model()
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
profiles = pass_timing.render()
assert "AnnotateSpans" in profiles
assert "ToANormalForm" in profiles
assert "InferType" in profiles
tvm.transform.PassContext.current().override_instruments(None)
mod = get_test_model()
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
profiles = pass_timing.render()
assert profiles == ""
instrument_definition_type = tvm.testing.parameter("decorator", "subclass")
def test_custom_instrument(instrument_definition_type):
class BaseTest:
def __init__(self):
self.events = []
def enter_pass_ctx(self):
self.events.append("enter ctx")
def exit_pass_ctx(self):
self.events.append("exit ctx")
def run_before_pass(self, mod, info):
self.events.append("run before " + info.name)
def run_after_pass(self, mod, info):
self.events.append("run after " + info.name)
if instrument_definition_type == "decorator":
MyTest = pass_instrument(BaseTest)
elif instrument_definition_type == "subclass": |
class MyTest(BaseTest, tvm.ir.instrument.PassInstrument):
def __init__(self):
BaseTest.__init__(self)
tvm.ir.instrument.PassInstrument.__init__(self)
mod = get_test_model()
my_test = MyTest()
with tvm.transform.PassContext(instruments=[my_test]):
mod = tvm.relay.transform.InferType()(mod)
assert (
"enter ctx"
"run before InferType"
"run after InferType"
"exit ctx" == "".join(my_test.events)
)
def test_disable_pass():
@pass_instrument
class CustomPI:
def __init__(self):
self.events = []
def should_run(self, mod, info):
if "InferType" not in info.name:
return False
return True
def run_before_pass(self, mod, info):
self.events.append(info.name)
mod = get_test_model()
custom_pi = CustomPI()
with tvm.transform.PassContext(instruments=[custom_pi]):
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
assert "InferType" == "".join(custom_pi.events)
def test_multiple_instrument():
@pass_instrument
class SkipPass:
def __init__(self, skip_pass_name):
self.skip_pass_name = skip_pass_name
def should_run(self, mod, info):
if self.skip_pass_name in info.name:
return False
return True
skip_annotate = SkipPass("AnnotateSpans")
skip_anf = SkipPass("ToANormalForm")
@pass_instrument
class PrintPassName:
def __init__(self):
self.events = []
def run_before_pass(self, mod, info):
self.events.append(info.name)
mod = get_test_model()
print_pass_name = PrintPassName()
with tvm.transform.PassContext(instruments=[skip_annotate, skip_anf, print_pass_name]):
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormal |
Form()(mod)
mod = tvm.relay.transform.InferType()(mod)
assert "InferType" == "".join(print_pass_name.events)
def test_instrument_pass_counts():
@pass_instrument
class PassesCounter:
def __init__(self):
self.run_before_count = 0
self.run_after_count = 0
def __clear(self):
self.run_before_count = 0
self.run_after_count = 0
def enter_pass_ctx(self):
self.__clear()
def exit_pass_ctx(self):
self.__clear()
def run_before_pass(self, mod, info):
self.run_before_count = self.run_before_count + 1
def run_after_pass(self, mod, info):
self.run_after_count = self.run_after_count + 1
mod = get_test_model()
passes_counter = PassesCounter()
with tvm.transform.PassContext(instruments=[passes_counter]):
tvm.relay.build(mod, "llvm")
assert passes_counter.run_after_count != 0
assert passes_counter.run_after_count == passes_counter.run_before_count
assert passes_counter.run_before_count == 0
assert passes_counter.run_after_count == 0
def test_list_pass_configs():
configs = tvm.transform.PassContext.list_configs()
assert len(configs) > 0
assert "relay.backend.use_auto_scheduler" in configs.keys()
assert configs["relay.backend.use_auto_scheduler"]["type"] == "IntImm"
def test_enter_pass_ctx_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
@pass_instrument |
class PIBroken(PI):
def __init__(self, id):
super().__init__(id)
def enter_pass_ctx(self):
events.append(self.id + " enter ctx")
raise RuntimeError("Just a dummy error")
pass_ctx = tvm.transform.PassContext(instruments=[PI("%1"), PIBroken("%2"), PI("%3")])
with pytest.raises(tvm.error.TVMError) as cm:
with pass_ctx:
pass
assert "Just a dummy error" in str(cm.execption)
assert "%1 enter ctx" "%2 enter ctx" "%1 exit ctx" == "".join(events)
cur_pass_ctx = tvm.transform.PassContext.current()
assert pass_ctx != cur_pass_ctx
assert not cur_pass_ctx.instruments
def test_enter_pass_ctx_exception_global():
@pass_instrument
class PIBroken:
def enter_pass_ctx(self):
raise RuntimeError("Just a dummy error")
cur_pass_ctx = tvm.transform.PassContext.current()
with pytest.raises(tvm.error.TVMError) as cm:
cur_pass_ctx.override_instruments([PIBroken()])
assert "Just a dummy error" in str(cm.exception)
assert not cur_pass_ctx.instruments
def test_exit_pass_ctx_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
@pass_instrument |
class PIBroken(PI):
def __init__(self, id):
super().__init__(id)
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
raise RuntimeError("Just a dummy error")
pass_ctx = tvm.transform.PassContext(instruments=[PI("%1"), PIBroken("%2"), PI("%3")])
with pytest.raises(tvm.error.TVMError) as cm:
with pass_ctx:
pass
assert "Just a dummy error" in str(cm.exception)
assert "%1 exit ctx" "%2 exit ctx" == "".join(events)
cur_pass_ctx = tvm.transform.PassContext.current()
assert pass_ctx != cur_pass_ctx
assert not cur_pass_ctx.instruments
def test_exit_pass_ctx_exception_global():
@pass_instrument
class PIBroken:
def exit_pass_ctx(self):
raise RuntimeError("Just a dummy error")
cur_pass_ctx = tvm.transform.PassContext.current()
with pytest.raises(tvm.error.TVMError) as cm:
cur_pass_ctx.override_instruments([PIBroken()])
cur_pass_ctx.override_instruments([PIBroken()])
assert "Just a dummy error" in str(cm.exception)
assert not cur_pass_ctx.instruments
def test_pass_exception():
events = []
@pass_instrument
class PI:
def enter_pass_ctx(self):
events.append("enter_pass_ctx")
def exit_pass_ctx(self):
events.append("exit_pass_ctx")
def should_run(self, mod, info):
events.append("should_run")
return True
def run_before_pass(self, mod, info):
events.append("run_before_pass")
def run_after_pass(self, mod, info):
events.append("run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
raise RuntimeError("Just a dummy error")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI()]):
mod = transform(mod)
assert "Just a dummy |
error" in str(cm.exception)
assert (
"enter_pass_ctx"
"should_run"
"run_before_pass"
"transform pass"
"exit_pass_ctx" == "".join(events)
)
def test_should_run_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
raise RuntimeError("Just a dummy error")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_run_before_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
raise RuntimeError("Just a dummy error")
def run_aft |
er_pass(self, mod, info):
events.append(self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%2 should_run"
"%1 run_before_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_run_after_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
raise RuntimeError("Just a dummy error")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
x, y = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xy"]
mod = tvm.IRModule.from_expr(tvm.relay.add(x, y))
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%2 should_run"
"%1 run_before_pass"
"%2 run_before_pass"
"transfor |
m pass"
"%1 run_after_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_instrument_call_sequence():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(" " + self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(" " + self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(" " + self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform1(mod, ctx):
events.append(" transform1 pass")
return mod
@tvm.transform.module_pass(opt_level=2)
def transform2(mod, ctx):
events.append(" transform2 pass")
return mod
mod = get_test_model()
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform1(mod)
mod = transform2(mod)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
" %1 should_run"
" %2 should_run"
" %1 run_before_pass"
" %2 run_before_pass"
" transform1 pass"
" %1 run_after_pass"
" %2 run_after_pass"
" %1 should_run"
" %2 should_run"
" %1 run_before_pass"
" %2 run_before_pass"
" transform2 pass"
" %1 run_after_pass"
" %2 run_after_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
) |
import numpy as np |
import pytest |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import transform
def test_basic():
mod = tvm.IRModule()
x2 = relay.var("x2", shape=(10, 5))
y2 = relay.var("y2", shape=(1, 5))
level2_func = relay.Function([x2, y2], relay.op.add(x2, y2))
x1 = relay.var("x1", shape=(10, 5))
y1 = relay.var("y1", shape=(1, 5))
level1_func = relay.Function([x1, y1], level2_func(x1, y1))
mod["main"] = level1_func
mod = relay.transform.InferType()(mod)
new_mod = transform.LambdaLift()(mod)
assert len(new_mod.functions) == 2
def test_closure():
mod = tvm.IRModule()
x = relay.var("x", shape=(2,))
y = relay.var("y", shape=(2,))
inner_func = relay.Function([x], x + y)
outer_func = relay.Function([y], inner_func)
clo = outer_func(relay.ones(shape=(2,), dtype="float32"))
mod["main"] = relay.Function([], relay.Call(clo, [relay.zeros(shape=(2,), dtype="float32")]))
mod = relay.transform.InferType()(mod)
new_mod = transform.LambdaLift()(mod)
assert len(new_mod.functions) == 3
def test_recursive():
mod = tvm.IRModule()
x = relay.var("x", shape=(2,))
i = relay.var("i", shape=(), dtype="int32")
s = relay.var("s", shape=(2,))
cond = i < relay.const(10, dtype="int32")
loop = relay.var("while_loop")
sb = relay.scope_builder.ScopeBuilder()
with sb.if_scope(cond):
ii = i + relay.const(1, dtype="int32")
ss = s + x
sb.ret(loop(ii, ss))
with sb.else_scope():
sb.ret(s)
func = relay.Function([i, s], sb.get())
ret = relay.Let(
loop, func, loop(relay.const(0, dtype="int32"), relay.zeros(shape=(2,), dtype="float32"))
)
mod["main"] = relay.Function([x], ret)
mod = relay.transform.InferType()(mod)
new_mod = transform.LambdaLift()(mod)
assert len(new_mod.functions) == 2
if __name__ == "__main__":
pytest.main() |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.relay |
import create_executor, transform
from tvm.relay.testing |
import rand, run_infer_type |
import tvm.testing
from tvm.testing |
import assert_allclose
def test_tc():
"""Simple testcase, check that transformation typechecks."""
mod = tvm.IRModule()
shape = (20, 20)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x1 = relay.var("x1", t)
x2 = relay.var("x2", t)
y = relay.Function([x1, x2], (x1 - x2) * x2)
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
assert mod["main"].checked_type == relay.FuncType([t, t], t)
def test_add():
"""Simple add testcase. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + x)
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() + x.numpy())
def test_add_tuple():
"""Add elements of tuple. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
tensor_type = relay.TensorType(shape, dtype)
t = relay.TupleType([tensor_type, tensor_type])
x = relay.var("x", t)
y = relay.Function([x], relay.TupleGetItem(x, 0) + relay.TupleGetItem(x, 1))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
mod = tvm.transform.PrintIR(show_meta_data=True)(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], tensor_type)
x = (rand(dtype, *shape), rand(dtype, *shape))
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x[0].numpy() + x[1].numpy())
def test_mult():
"""Simple multiplication testcase. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (15, 15)
dtype = "float32"
t = relay. |
TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x * x)
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() * x.numpy())
def test_ret_tuple():
"""Test tuple return type. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], relay.Tuple([x, x * relay.const(2.0)]))
func = run_infer_type(func)
mod["main"] = func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
func = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], relay.TupleType([t, t]))
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(func)(x)
assert_allclose(y[0].numpy(), x.numpy())
assert_allclose(y[1].numpy(), x.numpy() * 2.0)
def test_add_broadcast():
"""Test adding matrices of different size. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = "float32"
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x1 = relay.var("x1", t1)
x2 = relay.var("x2", t2)
func = relay.Function([x1, x2], x1 + x2)
func = run_infer_type(func)
mod["main"] = func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
func = mod["main"]
x1_np = rand(dtype, *shape1).numpy()
x2_np = rand(dtype, *shape2).numpy()
expected_forward = x1_np + x2_np
expected_forward_type = relay.TensorType(expected_forward.shape, dtype)
assert mod["main"].checked_type == relay.FuncType([t1, t2], expected_forward_type)
forward = create_executor(mod=mod).evaluate(func)(x1_np, x2_np)
a |
ssert_allclose(forward.numpy(), expected_forward)
def test_reverse_ad_identity():
"""Simple test with reverse mode ad."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType(
[t], relay.TupleType([t, relay.TupleType([t])])
)
x = rand(dtype, *shape)
(forward), (grad,) = create_executor(mod=mod).evaluate(back_func)(x)
assert_allclose(forward.numpy(), x.numpy())
assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_multivar_reverse_ad():
"""Simple test with multivariate reverse mode ad."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType(
[t, t], relay.TupleType([t, relay.TupleType([t, t])])
)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
back_func
)(x, y)
assert_allclose(forward.numpy(), x.numpy() * y.numpy())
assert_allclose(grad_x.numpy(), y.numpy())
assert_allclose(grad_y.numpy(), x.numpy())
def test_partial_eval():
"""Test transformation following reverse mode ad and PartialEval"""
mod = tvm.IRModule()
shape = (10, 10 |
)
dtype = "float32"
t = relay.TensorType(shape, dtype)
func = relay.Function([], relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.InferType()(mod)
back_func = mod["main"]
transform.PartialEvaluate()(mod)
def test_after_partial_eval():
"""Test transformation following reverse mode ad and PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
back_func = mod["main"]
seq = tvm.transform.Sequential(
[
transform.PartialEvaluate(),
transform.InferType(),
transform.LazyGradientInit(),
transform.InferType(),
transform.DeadCodeElimination(),
transform.InferType(),
]
)
mod = seq(mod)
assert mod["main"].checked_type == relay.FuncType(
[t, t], relay.TupleType([t, relay.TupleType([t, t])])
)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
back_func
)(x, y)
assert_allclose(forward.numpy(), x.numpy() * y.numpy())
assert_allclose(grad_x.numpy(), y.numpy())
assert_allclose(grad_y.numpy(), x.numpy())
def test_before_partial_eval():
"""Test transformation before PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], x * y)
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func) |
mod["main"] = back_func
seq = tvm.transform.Sequential(
[
transform.LazyGradientInit(),
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(),
transform.InferType(),
]
)
mod = seq(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType(
[t, t], relay.TupleType([t, relay.TupleType([t, t])])
)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
back_func
)(x, y)
assert_allclose(forward.numpy(), x.numpy() * y.numpy())
assert_allclose(grad_x.numpy(), y.numpy())
assert_allclose(grad_y.numpy(), x.numpy())
def test_zeros():
"""Simple test using "zeros" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.zeros(shape, dtype))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy())
def test_ones():
"""Simple test using "ones" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.ones(shape, dtype))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy()))
def test_zeros_like():
"""Simple test using "zeros_like" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32" |
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.zeros_like(x))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy())
def test_ones_like():
"""Simple test using "ones_like" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.ones_like(x))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy()))
if __name__ == "__main__":
tvm.testing.main() |
"""Test legalize pass""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tvm.relay |
import transform, analysis
from tvm.relay.testing.temp_op_attr |
import TempOpAttr
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_legalize():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_legalize_none():
"""Test doing nothing by returning 'None'"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.nn.global_max_pool2d(x)
y = relay.Function([x], y)
return y
called = [False]
def legalize_conv2d(attrs, inputs, types):
called[0] = True
return None
with TempOpAttr("nn.global_max_pool2d", "FTVMLega |
lize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(before(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
assert called[0]
def test_legalize_multiple_ops():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def legalize_relu(attrs, inputs, types):
data = inputs[0]
add = relay.add(tvm.relay.const(0, "float32"), data)
return relay.nn.relu(add)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.add(tvm.relay.const(0, "float32"), y)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
with TempOpAttr("nn.relu", "FTVMLegalize", legalize_relu):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_legalize_multi_input():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.var("y", shape=(1, 64, 56, 20)) |
z = relay.var("z", shape=(1, 64, 56, 10))
func = relay.concatenate([x, y, z], axis=3)
func = relay.Function([x, y, z], func)
return func
def legalize_concatenate(attrs, inputs, types):
assert len(inputs) == 1
assert isinstance(inputs[0], tvm.relay.expr.Tuple)
assert len(types) == 2
assert isinstance(types[0], tvm.relay.ty.TupleType)
assert isinstance(types[1], tvm.relay.ty.TensorType)
return None
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.var("y", shape=(1, 64, 56, 20))
z = relay.var("z", shape=(1, 64, 56, 10))
func = relay.concatenate([x, y, z], axis=3)
func = relay.Function([x, y, z], func)
return func
with TempOpAttr("concatenate", "FTVMLegalize", legalize_concatenate):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
if __name__ == "__main__":
test_legalize()
test_legalize_none()
test_legalize_multiple_ops()
test_legalize_multi_input() |
"""Test legalize pass""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import topi
from tvm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.