text
stringlengths 1
2.05k
|
---|
import transform
from tvm.relay.prelude |
import Prelude
def test_remove_all_prelude_functions():
mod = tvm.IRModule()
p = Prelude(mod)
x = relay.var("x", shape=(1, 16))
mod["main"] = relay.Function([x], x)
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["main"])
def test_remove_all_prelude_functions_but_referenced_functions():
mod = tvm.IRModule()
p = Prelude(mod)
x = relay.var("x", shape=(1, 16))
id_func = relay.Function([x], x)
id_name = relay.GlobalVar("id_func")
mod[id_name] = id_func
mod["main"] = relay.Function([x], id_name(x))
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["id_func", "main"])
def test_keep_only_referenced_prelude_functions():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
tl = p.mod.get_global_var("tl")
l = nil()
for i in [4, 3, 2, 1, 0]:
l = cons(relay.const(i), l)
body = hd(tl(tl(l)))
mod["main"] = relay.Function([], body)
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["tl", "hd", "main"])
def test_multiple_entry_functions():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
tl = p.mod.get_global_var("tl")
l = nil()
for i in [4, 3, 2, 1, 0]:
l = cons(relay.const(i), l)
body = hd(tl(tl(l)))
mod["main1"] = relay.Function([], body)
x = relay.var("x", shape=(1, 16))
id_func = relay.Function([x], x)
id_name = relay.GlobalVar("id_func")
mod[id_name] = id_func
mod["main2"] = relay.Function([x], id_name(x))
mod = relay.transform.RemoveUnusedFunctions(["main1", "main2"])(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["tl", "hd", "main2", "id_func", "main |
1"])
def test_globalvar_as_call_arg():
mod = tvm.IRModule()
p = Prelude(mod)
tensor_array = p.get_global_var("tensor_array", "int32")
tensor1 = p.get_ctor(p.get_name("tensor_t", "int32"), "tensor1", "int32")
write = p.get_global_var("tensor_array_write", "int32")
stack = p.get_global_var("tensor_array_stack", "int32")
v = relay.var("v")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))
tensor_array2 = stack(tensor_array1)
mod["main"] = relay.Function([v], tensor_array2)
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert "tensor_array_int32" in l
def test_call_globalvar_without_args():
def get_mod():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn2 = relay.Function([], relay.const(2))
g1 = relay.GlobalVar("g1")
g2 = relay.GlobalVar("g2")
mod[g1] = fn1
mod[g2] = fn2
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, g1, g2), []))
return mod
mod = get_mod()
ref_mod = get_mod()
mod = relay.transform.RemoveUnusedFunctions()(mod)
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
if __name__ == "__main__":
pytest.main() |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import run_opt_pass, run_infer_type |
import numpy as np
def test_simplify_reshape():
def before():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return relay.Function([x, w], y)
def expected():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(32, 16, 16))
return relay.Function([x, w], y)
def symbolic():
b = tvm.te.size_var("b")
x = relay.var("x", shape=(b, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return relay.Function([x, w], y)
z = before()
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
z = symbolic()
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(symbolic(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_simplify_transpose():
def before1():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")
y = relay.transpose(x, axes=[0, 2, 3, 1])
y = relay.layout_transform(y, "NHWC", "HWCN")
y = relay.transpose(y, axes=[3, 0, 1, 2])
return relay.Function([x], y)
def expected1():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")
y = relay.transpose(x, axes=[0, 2, 3, 1])
return relay.Function([x |
], y)
def before2():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")
y = relay.nn.relu(x)
y = relay.transpose(y, axes=[0, 2, 3, 1])
y = relay.transpose(y, axes=[1, 2, 3, 0])
y = relay.transpose(y, axes=[3, 2, 0, 1])
return relay.Function([x], y)
def expected2():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before3():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")
y = relay.nn.relu(x)
y = relay.transpose(y)
y = relay.transpose(y)
y = relay.transpose(y, axes=[0, 2, -1, 1])
y = relay.transpose(y)
y = relay.transpose(y)
return relay.Function([x], y)
def expected3():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")
y = relay.nn.relu(x)
y = relay.transpose(y, axes=[0, 2, 3, 1])
return relay.Function([x], y)
def before4():
"""
Simplify transpose->layout_transform and its inverse.
Input:
NHWC -> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC
Simplified:
NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
"""
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
y = relay.transpose(x, axes=[0, 3, 1, 2])
y = relay.layout_transform(y, "NCHW", "NCHW4c")
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.transpose(y, axes=[0, 2, 3, 1])
return relay.Function([x], y)
def expected4():
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
y = relay.layout_transform(x, "NHWC", "NCHW4c")
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NHWC")
return relay.Function([x], y)
def before5():
"""
Simplify layout_transform->layout_transform and its inverse.
Input:
NHWC - |
> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC
Simplified:
NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
"""
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.layout_transform(y, "NCHW", "NCHW4c")
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function([x], y)
def expected5():
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
y = relay.layout_transform(x, "NHWC", "NCHW4c")
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NHWC")
return relay.Function([x], y)
def before6():
"""
Remove trivial layout_transform->layout_transform.
Input:
NCHW -> NHWC -> NCHW -> op
Simplified:
NHWC -> op
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.layout_transform(x, "NCHW", "NHWC")
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected6():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before7():
"""
Remove trivial layout_transform->layout_transform.
Input:
NCHW4c -> NCHW8c -> NCHW4c -> op
Simplified:
NCHW4c -> op
"""
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.layout_transform(x, "NCHW4c", "NCHW8c")
y = relay.layout_transform(y, "NCHW8c", "NCHW4c")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected7():
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before8():
"""
Simplify layout_transform->lay |
out_transform with rank contraction and expansion
Input:
NCHW4c -> NCHW -> NCHW8c -> op
Simplified:
NCHW4c -> NCHW8c -> op
"""
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.layout_transform(x, "NCHW4c", "NCHW")
y = relay.layout_transform(y, "NCHW", "NCHW8c")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected8():
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.layout_transform(x, "NCHW4c", "NCHW8c")
y = relay.nn.relu(y)
return relay.Function([x], y)
def before9():
"""
Remove trivial layout_transform->layout_transform.
Input:
NCHW -> NCHW4c -> NCHW -> op
Simplified:
NCHW -> op
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected9():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before10():
"""
Simplify layout_transform->layout_transform without rank change to transpose.
Input:
NCHW -> NHWC -> CHWN -> op
Simplified:
NCHW -> CHWN -> op
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.layout_transform(x, "NCHW", "NHWC")
y = relay.layout_transform(y, "NHWC", "CHWN")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected10():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.transpose(x, axes=[1, 2, 3, 0])
y = relay.nn.relu(y)
return relay.Function([x], y)
for before, expected in [
[before1(), expected1()],
[before2(), expected2()],
[before3(), expected3()], |
[before4(), expected4()],
[before5(), expected5()],
[before6(), expected6()],
[before7(), expected7()],
[before8(), expected8()],
[before9(), expected9()],
[before10(), expected10()],
]:
after = run_opt_pass(before, transform.SimplifyExpr())
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(after, expected), "\nafter: {} \nexpected: {}".format(
after, expected
)
def test_simplify_full_elementwise():
def validate(shape, value, dtype):
def before_left(x, elem_op, full):
return elem_op(full, x)
def after_left(x, elem_op, value):
if elem_op == relay.add and value == 0:
return x
elif elem_op == relay.multiply and (value == 1 or (value > 1 and dtype == "bool")):
return x
return elem_op(relay.const(value, dtype), x)
def before_right(x, elem_op, full):
return elem_op(x, full)
def after_right(x, elem_op, value):
if elem_op in [relay.add, relay.subtract] and value == 0:
return x
elif elem_op in [relay.multiply, relay.divide] and (
value == 1 or (value > 1 and dtype == "bool")
):
return x
return elem_op(x, relay.const(value, dtype))
x = relay.var("x", shape=shape, dtype=dtype)
elem_ops = [relay.add, relay.multiply, relay.subtract, relay.divide]
full_ops = []
if value == 0:
full_ops.append(relay.zeros(shape, dtype))
full_ops.append(relay.zeros_like(x))
if value == 1:
full_ops.append(relay.ones(shape, dtype))
full_ops.append(relay.ones_like(x))
else:
full_ops.append(relay.full(relay.const(value, dtype), shape))
full_ops.append(relay.full_like(x, relay.const(value, dtype)))
for op in elem_ops:
for full in full_ops: |
z = before_left(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(after_left(x, op, value), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
z = before_right(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(after_right(x, op, value), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
full_ops = []
if value == 0:
full_ops.append(relay.zeros(shape * 2, dtype))
if value == 1:
full_ops.append(relay.ones(shape * 2, dtype))
else:
full_ops.append(relay.full(relay.const(value, dtype), shape * 2))
for op in elem_ops:
for full in full_ops:
z = before_left(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(before_left(x, op, full), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
z = before_right(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(before_right(x, op, full), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
for shape in [[10], [10, 10], [10, 10, 10]]:
for dtype in ["float32", "int32", "bool"]:
for value in [0, 1, 2]:
validate(shape, value, dtype)
def test_eliminate_identity():
def check(x, y=None, do_nothing=False):
expected = run_infer_type(x)
if do_nothing:
actual = run_opt_pass(x, transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
else:
assert y is not None
actual = run_opt_pass(y, transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
shape = [2, 3, 4]
dtype = "float32"
x = relay.var("x", shape=shap |
e, dtype=dtype)
x = run_opt_pass(x, transform.InferType())
for (op, op_like, id_op, const) in [
(relay.zeros, relay.zeros_like, relay.add, relay.const(0, dtype)),
(relay.ones, relay.ones_like, relay.multiply, relay.const(1, dtype)),
]:
check(x, id_op(op_like(x), x))
check(x, id_op(op(shape, dtype), x))
check(x, id_op(const, x))
check(x, id_op(op(shape[1:], dtype), x))
check(x, id_op(x, op_like(x)))
check(x, id_op(x, op(shape, dtype)))
check(x, id_op(x, const))
check(x, id_op(x, op(shape[1:], dtype)))
check(id_op(x, op([2] + shape, dtype)), do_nothing=True)
check(id_op(op([2] + shape, dtype), x), do_nothing=True)
for (op, op_like, id_op, const) in [
(relay.zeros, relay.zeros_like, relay.subtract, relay.const(0, dtype)),
(relay.ones, relay.ones_like, relay.divide, relay.const(1, dtype)),
]:
check(x, id_op(x, op_like(x)))
check(x, id_op(x, const))
check(x, id_op(x, op(shape, dtype)))
check(x, id_op(x, op(shape[1:], dtype)))
check(id_op(x, op([2] + shape, dtype)), do_nothing=True)
check(id_op(const, x), id_op(op(shape, dtype), x))
check(id_op(const, x), id_op(op_like(x), x))
def test_simplify_same_cast():
dtype = "int32"
data = relay.var("data", shape=(3, 4, 5), dtype=dtype)
expr1 = relay.cast(data, dtype)
dtype_like = relay.var("dtype_like", shape=(2, 2, 2), dtype=dtype)
expr2 = relay.cast_like(data, dtype_like)
expected = run_infer_type(data)
actual1 = run_opt_pass(expr1, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual1, expected)
actual2 = run_opt_pass(expr2, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual2, expected)
def test_simplify_consecutive_cast():
x = relay.var("x", shape=(3, 4, 5), dtype="int8")
y = relay.var("y", shape=(3, 4), dtype="int64")
z = relay.var("z", shape=(3,), dtype="float32")
expr1 = relay.cast(x, "in |
t16")
expr2 = relay.cast(expr1, "int32")
expr3 = relay.cast_like(expr2, y)
expr4 = relay.cast_like(expr3, z)
actual1 = run_opt_pass(expr2, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(x, "int32"))
assert tvm.ir.structural_equal(actual1, expected)
actual2 = run_opt_pass(expr3, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(x, "int64"))
assert tvm.ir.structural_equal(actual2, expected)
actual3 = run_opt_pass(expr4, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(x, "float32"))
assert tvm.ir.structural_equal(actual3, expected)
x = relay.var("x", shape=(3, 4, 5), dtype="float32")
y = relay.var("y", shape=(3, 4), dtype="float32")
expr1 = relay.cast(x, "int32")
expr2 = relay.cast_like(expr1, y)
actual = run_opt_pass(expr2, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(expr1, "float32"))
assert tvm.ir.structural_equal(actual, expected)
x = relay.var("x", shape=(3, 4), dtype="int64")
expr1 = relay.cast(x, "bool")
expr2 = relay.cast(expr1, "int32")
actual = run_opt_pass(expr2, relay.transform.SimplifyExpr())
expected = run_infer_type(expr2)
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_reshape_like():
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
expr = relay.reshape_like(data, shape_like)
expected = run_infer_type(relay.reshape(data, (6, 2, 2)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_reshape_like_attrs():
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
expr = relay.reshape_like(data, shape_like, lhs_begin=2, rhs_begin=1)
expected = run_infer_type(relay.reshape(data, (2, 3, 2, 2)))
actual = run_opt_pass(e |
xpr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_zeros_like():
dtype = "int32"
shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
expr = relay.zeros_like(shape_like)
expected = run_infer_type(relay.zeros((3, 4, 5), dtype))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_ones_like():
dtype = "int32"
shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
expr = relay.ones_like(shape_like)
expected = run_infer_type(relay.ones((3, 4, 5), dtype))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_full_like():
dtype = "int32"
shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
fill_value = relay.var("fill", relay.TensorType((), "float32"))
expr = relay.full_like(shape_like, fill_value)
expected = run_infer_type(relay.full(fill_value, (3, 4, 5), dtype))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_collapse_sum_like():
data = relay.var("data", shape=(3, 3, 3), dtype="float32")
shape_like = relay.var("shape_like", shape=(3,), dtype="float32")
expr = relay.collapse_sum_like(data, shape_like)
expected = run_infer_type(relay.collapse_sum_to(data, (3,)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_broadcast_to_like():
data = relay.var("data", shape=(3,), dtype="float32")
shape_like = relay.var("shape_like", shape=(3, 3, 3), dtype="float32")
expr = relay.broadcast_to_like(data, shape_like)
expected = run_infer_type(relay.broadcast_to(data, (3, 3, 3)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def te |
st_concretize_cast_like():
dim_any = tvm.tir.Any()
data = relay.var("data", shape=(3, dim_any, 5), dtype="float32")
dtype_like = relay.var("dtype_like", shape=(dim_any, 3, 3), dtype="int32")
expr = relay.cast_like(data, dtype_like)
expected = run_infer_type(relay.cast(data, "int32"))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_multiple():
x = relay.var("x", shape=(2, 3), dtype="float32")
y = relay.var("y", shape=(3,), dtype="float32")
l = x + y
dl = relay.ones_like(l)
dx = relay.zeros_like(x)
dy = relay.zeros_like(y)
dx = dx + relay.collapse_sum_like(dl, dx)
dy = dy + relay.collapse_sum_like(dl, dy)
ret = relay.Tuple([dx, dy])
dl_c = relay.ones((2, 3), "float32")
dx_c = relay.collapse_sum_to(dl_c, (2, 3))
dy_c = relay.collapse_sum_to(dl_c, (3,))
ret_c = relay.Tuple([dx_c, dy_c])
expected = run_infer_type(ret_c)
actual = run_opt_pass(ret, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_simplify_mul_add():
def check_simple_fold(origin_exprs, expect_expr):
for origin_expr in origin_exprs:
simple_expr = run_opt_pass(origin_expr, transform.SimplifyExpr())
assert tvm.ir.structural_equal(simple_expr, expect_expr)
n = 32
c1_val = np.random.uniform(size=n).astype("float32")
c2_val = np.random.uniform(size=n).astype("float32")
c3_val = np.random.uniform(size=n).astype("float32")
x = relay.var("x", shape=(n,), dtype="float32")
c1 = relay.const(c1_val)
c2 = relay.const(c2_val)
c3 = relay.const(c3_val)
origin_exprs = [
x + c1 + c2,
c1 + x + c2,
]
expect_expr = x + relay.const(c1_val + c2_val)
check_simple_fold(origin_exprs, expect_expr)
origin_exprs = [
x * c1 * c2,
c1 * x * c2,
]
expect_expr = x * relay.const(c1_val * c2_val)
check_simple_fold( |
origin_exprs, expect_expr)
origin_exprs = [
(x + c1) * c2,
(c1 + x) * c2,
c2 * (x + c1),
c2 * (c1 + x),
]
expect_expr = x * c2 + relay.const(c1_val * c2_val)
check_simple_fold(origin_exprs, expect_expr)
origin_exprs = [
(x + c1) * c2 + c3,
(c1 + x) * c2 + c3,
c2 * (x + c1) + c3,
c2 * (c1 + x) + c3,
c3 + (x + c1) * c2,
c3 + (c1 + x) * c2,
c3 + c2 * (x + c1),
c3 + c2 * (c1 + x),
]
expect_expr = x * c2 + relay.const(c1_val * c2_val + c3_val)
check_simple_fold(origin_exprs, expect_expr)
origin_exprs = [
(x * c1 + c2) * c3,
(c1 * x + c2) * c3,
(c2 + x * c1) * c3,
(c2 + c1 * x) * c3,
c3 * (x * c1 + c2),
c3 * (c1 * x + c2),
c3 * (c2 + x * c1),
c3 * (c2 + c1 * x),
]
expect_expr = x * relay.const(c1_val * c3_val) + relay.const(c2_val * c3_val)
check_simple_fold(origin_exprs, expect_expr)
def test_simplify_rsqrt():
shape = (32, 1, 1)
x = relay.var("x", shape=shape, dtype="float32")
def before(c):
return relay.const(c) / relay.sqrt(x)
def expected(c):
if c == 1:
return relay.rsqrt(x)
else:
return relay.const(c) * relay.rsqrt(x)
for c in [1.0, 2.0, 2.5]:
opt = run_opt_pass(before(c), transform.SimplifyExpr())
after = run_opt_pass(expected(c), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_dq_argmax():
shape = (4, 32, 1, 1)
x = relay.var("x", shape=shape, dtype="int8")
def before():
y = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0))
return relay.op.argmax(y, axis=1)
def expected():
return relay.op.argmax(x, axis=1)
opt = run_opt_pass(before(), transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_dq_argmin(): |
shape = (4, 32, 1, 1)
x = relay.var("x", shape=shape, dtype="int8")
def before():
y = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0))
return relay.op.argmin(y, axis=1)
def expected():
return relay.op.argmin(x, axis=1)
opt = run_opt_pass(before(), transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_dq_argsort():
shape = (4, 32, 1, 1)
x = relay.var("x", shape=shape, dtype="int8")
def before():
y = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0))
return relay.op.argsort(y, axis=1)
def expected():
return relay.op.argsort(x, axis=1)
opt = run_opt_pass(before(), transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_clip_cast():
x = relay.var("x", shape=(4, 8), dtype="int32")
def before():
clip = relay.clip(x, a_min=0.0, a_max=255.0)
cast = relay.cast(clip, "uint8")
return relay.cast(cast, "int32")
def expected():
return relay.clip(x, a_min=0.0, a_max=255.0)
opt = run_opt_pass(before(), transform.SimplifyExpr())
ref = run_infer_type(expected())
assert tvm.ir.structural_equal(opt, ref)
def test_simplify_cast_clip():
x = relay.var("x", shape=(4, 8), dtype="int32")
def before():
cast = relay.cast(x, "uint8")
return relay.clip(cast, a_min=0.0, a_max=255.0)
def expected():
return relay.cast(x, "uint8")
opt = run_opt_pass(before(), transform.SimplifyExpr())
ref = run_infer_type(expected())
assert tvm.ir.structural_equal(opt, ref)
def test_simplify_add():
x = relay.var("x", shape=(1, 3, 100, 100), dtype="float32")
def before():
return relay.add(x, x)
def expected():
s = relay.const(2.0)
return relay.multiply(x, s)
opt = run_opt_pass(before(), transform.SimplifyExp |
r())
ref = run_infer_type(expected())
assert tvm.ir.structural_equal(opt, ref)
if __name__ == "__main__":
pytest.main([__file__]) |
from tvm.ir |
import IRModule, structural_equal
from tvm |
import relay as rly
from tvm.relay.transform |
import SimplifyInference, InferType
def test_simplify_batchnorm(dtype="float32"):
def simple_bn(x, gamma, beta, moving_mean, moving_var, axis=1, epsilon=1e-5, shape=None):
scale = rly.multiply(
rly.const(1, dtype) / rly.sqrt(moving_var + rly.const(epsilon, dtype)), gamma
)
shift = rly.add(rly.multiply(rly.negative(moving_mean), scale), beta)
num_newaxis = len(shape) - (axis + 1)
if num_newaxis:
scale = rly.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
shift = rly.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
return x * scale + shift
def check(dim, axis, nstep):
eps = 0.01
ttype1 = rly.TensorType(tuple(10 for i in range(dim)), dtype)
ttype2 = rly.TensorType((10,), dtype)
x = rly.var("x", ttype1)
beta = rly.var("beta", ttype2)
gamma = rly.var("gamma", ttype2)
moving_var = rly.var("moving_var", ttype2)
moving_mean = rly.var("moving_mean", ttype2)
y1, y2 = x, x
for _ in range(nstep):
y1, _, _ = rly.nn.batch_norm(
y1 + rly.const(1, dtype),
gamma,
beta,
moving_mean,
moving_var,
epsilon=eps,
axis=axis,
)
y1 = rly.nn.dropout(y1)
y2 = simple_bn(
y2 + rly.const(1, dtype),
gamma,
beta,
moving_mean,
moving_var,
epsilon=eps,
axis=axis,
shape=ttype1.shape,
)
mod = IRModule.from_expr(y1)
simplify = SimplifyInference()
mod = InferType()(mod)
mod = simplify(mod)
y1 = mod["main"].body
assert structural_equal(y1, y2, map_free_vars=True)
check(2, 1, 1)
check(4, 1, 1)
check(4, 0, 3)
if __name__ == "__main__":
test_simplify_batchnorm(dtype="float32")
test_simplify_batchnorm( |
dtype="float16") |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.build_module |
import bind_params_by_name
from tvm.relay.testing |
import run_infer_type, create_workload
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_split_concat_metal():
shape = (1, 1, 1, 3)
dtype = "float32"
axis = 1
inputs = []
for i in range(100):
inputs.append(relay.var("p{}".format(i), shape=shape, dtype=dtype))
def before():
inp = relay.Tuple(inputs)
return relay.op.concatenate(inp, axis)
def expected():
limit = tvm.target.Target("metal").max_function_args - 1
splitNum = int(len(inputs) / limit)
if len(inputs) % limit > 0:
splitNum += 1
splitted = []
for i in range(splitNum):
startIdx = i * limit
argsCount = min(limit, len(inputs) - startIdx)
args = []
for j in range(argsCount):
args.append(inputs[j + startIdx])
t = relay.Tuple(args)
concat = relay.op.concatenate(t, axis)
splitted.append(relay.annotation.stop_fusion(concat))
inp = relay.Tuple(splitted)
return relay.op.concatenate(inp, axis)
res = run_opt_pass(before(), transform.SplitArgs(tvm.target.Target("metal").max_function_args))
exp = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(res, exp)
def test_split_concat_cuda():
shape = (1, 1, 1, 3)
dtype = "float32"
axis = 1
inputs = []
for i in range(100):
inputs.append(relay.var("p{}".format(i), shape=shape, dtype=dtype))
def before():
inp = relay.Tuple(inputs)
return relay.op.concatenate(inp, axis)
def expected():
inp = relay.Tuple(inputs)
return relay.op.concatenate(inp, axis)
res = run_opt_pass(before(), transform.SplitArgs(tvm.target.Target("cuda").max_function_args) |
)
exp = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(res, exp)
if __name__ == "__main__":
test_split_concat_metal()
test_split_concat_cuda() |
import pytest |
import sys |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
from tvm |
import relay
from tvm.relay.analysis |
import detect_feature
from tvm.relay |
import op, create_executor, transform
from tvm.relay.prelude |
import Prelude
from tvm.relay.testing |
import count
from tvm.relay.analysis |
import Feature
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_explicit_bound():
x = relay.const(1)
y = op.add(x, x)
z = op.add(y, y)
f = relay.Function([], op.add(z, z))
assert not Feature.fLet in detect_feature(f)
anf = run_opt_pass(f, transform.ToANormalForm())
assert Feature.fLet in detect_feature(anf)
check_eval(f(), 8.0)
check_eval(anf(), 8.0)
def test_order():
z = relay.const(3)
y = relay.const(2)
x = relay.const(1)
val = x + y * z
check_eval(val, 7.0)
anf = run_opt_pass(val, [transform.ToANormalForm(), transform.InferType()])
a = relay.Var("a", relay.IncompleteType())
b = relay.Var("b", relay.IncompleteType())
c = relay.Var("c", relay.IncompleteType())
d = relay.Var("d", relay.IncompleteType())
e = relay.Var("e", relay.IncompleteType())
expected_output = e
expected_output = relay.Let(e, a + d, expected_output)
expected_output = relay.Let(d, b * c, expected_output)
expected_output = relay.Let(c, z, expected_output)
expected_output = relay.Let(b, y, expected_output)
expected_output = relay.Let(a, x, expected_output)
expected_output = run_opt_pass(expected_output, transform.InferType())
assert tvm.ir.structural_equal(anf, expected_output)
def test_if():
cond = relay.const(True)
x = relay.If(cond, relay.const(2), relay.const(3))
anf = run_opt_pass(x, [transform.ToANormalForm(), transform.InferType()])
a = relay.Var("a", relay.Incomp |
leteType())
b = relay.Var("b", relay.IncompleteType())
c = relay.Var("c", relay.IncompleteType())
d = relay.Var("d", relay.IncompleteType())
true_branch = relay.Let(a, relay.const(2), a)
false_branch = relay.Let(b, relay.const(3), b)
expected_output = relay.If(c, true_branch, false_branch)
expected_output = relay.Let(d, expected_output, d)
expected_output = relay.Let(c, cond, expected_output)
expected_output = run_opt_pass(expected_output, transform.InferType())
assert tvm.ir.structural_equal(anf, expected_output)
def test_let_as_subexpr():
def on_cpu(x):
return relay.annotation.on_device(x, tvm.device("cpu"), constrain_result=True)
x = relay.Var("x", relay.IncompleteType())
c = relay.const(1)
l = relay.Let(x, on_cpu(c + c), x)
body = l * l
anf = run_opt_pass(body, [transform.ToANormalForm(), transform.InferType()])
v0 = relay.Var("v0", relay.IncompleteType())
v1 = relay.Var("v1", relay.IncompleteType())
v2 = relay.Var("v2", relay.IncompleteType())
expected_output = relay.Let(
v0,
on_cpu(c),
relay.Let(
x,
on_cpu(v0 + v0),
relay.Let(v1, x, relay.Let(v2, v1 * v1, v2)),
),
)
expected_output = run_opt_pass(expected_output, transform.InferType())
tvm.ir.assert_structural_equal(anf, expected_output)
def test_recursion():
"""
Program:
let f(n: i32) -> i32 = {
m = (n * 2)
if (n == 0) {
return m;
} else {
return m + f(n - 1);
}
}
f(5);
"""
mod = tvm.IRModule()
i64 = relay.TensorType((), "int64")
f = relay.GlobalVar("f")
n = relay.Var("n", i64)
m = n * relay.const(2, "int64")
funcbody = relay.If(
relay.equal(n, relay.const(0, "int64")), m, m + f(n - relay.const(1, "int64"))
)
value = relay.Function([n], funcbody, i64, [])
mod[f] = value
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
old_f = |
mod[f]
mod = transform.ToANormalForm()(mod)
f = mod[f]
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
def test_ref():
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
check_eval(body, 3)
opt_body = run_opt_pass(body, transform.ToANormalForm())
check_eval(opt_body, 3)
def test_nat_add():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
add = p.mod.get_global_var("nat_add")
dev = tvm.device("llvm", 0)
intrp = create_executor(mod=mod, device=dev, target="llvm")
assert mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
assert count(p, intrp.evaluate(add(s(z()), s(z())))) == 2
expr = add(s(z()), s(z()))
f = relay.GlobalVar("f")
mod[f] = relay.Function([], expr)
mod = transform.ToANormalForm()(mod)
expr = mod["f"]
assert count(p, intrp.evaluate(expr.body)) == 2
assert Feature.fLet in detect_feature(mod[add])
def test_let():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, x + y)
body = relay.Let(x, d, body)
check_eval(body, 8)
opt_body = run_opt_pass(body, transform.ToANormalForm())
check_eval(opt_body, 8)
def test_function():
t = relay.TensorType((), "float32")
x = relay.Var("x", t)
f = relay.Function([x], x + x)
d = relay.const(4.0, "float32")
anf_f = run_opt_pass(f, transform.ToANormalForm())
assert isinstance(anf_f, relay.Function)
check_eval(f(d), 8)
check_eval(anf_f(d), 8)
def test_gradient_if():
x = relay.var("a", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
net = re |
lay.If(cond, x, x)
net = relay.add(x, net)
net = relay.Function([cond, x, y], net)
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.ToANormalForm()(mod)
mod = relay.transform.InferType()(mod)
mod["main"] = relay.transform.gradient(mod["main"], mode="higher_order")
mod = relay.transform.ToANormalForm()(mod)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.analysis |
import detect_feature
from tvm.relay |
import op, create_executor, transform
from tvm.relay.prelude |
import Prelude
from tvm.relay.testing |
import count, create_workload
from tvm.relay.analysis |
import Feature
from tvm.relay.analysis |
import check_basic_block_normal_form
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_no_explicit_bind():
x = relay.const(1)
y = op.add(x, x)
z = op.add(y, y)
f = relay.Function([], op.add(z, z))
"""
fn () {
%0 = add(1, 1);
%1 = add(%0, %0);
add(%1, %1)
}
"""
assert not Feature.fLet in detect_feature(f)
bblock = run_opt_pass(f, transform.ToBasicBlockNormalForm())
assert Feature.fLet not in detect_feature(bblock)
check_eval(f(), 8.0)
check_eval(bblock(), 8.0)
check_basic_block_normal_form(bblock)
def test_top_level_nested_if():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
z = relay.var("z", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.add(y, y)
z2 = relay.add(z, z)
true_branch = relay.If(cond_t, relay.add(z2, y2), relay.add(three, y2))
false_branch = relay.If(cond_f, z2, one)
body = relay.If(x, true_branch, false_branch)
"""
free_var %x: bool
if (%x) {
if (True) {
free_var %z: float32
%0 = add(%z, %z);
free_var %y: float32
%1 = add(%y, %y);
add(%0, %1)
} else {
add(3f, %1)
}
} else {
if (False) {
%0
} else {
1f
}
}
"""
def expected():
x = relay.va |
r("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
z = relay.var("z", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.var("y2")
z2 = relay.var("z2")
true_branch = relay.If(cond_t, relay.add(z2, y2), relay.add(three, y2))
true_branch = relay.Let(y2, relay.add(y, y), true_branch)
false_branch = relay.If(cond_f, z2, one)
body = relay.If(x, true_branch, false_branch)
body = relay.Let(z2, relay.add(z, z), body)
return body
bblock = run_opt_pass(body, [transform.ToBasicBlockNormalForm(), transform.InferType()])
"""
free_var %z: float32
let %x: float32 = add(%z, %z) /* ty=float32 */;
free_var %x1: bool
if (%x1) {
free_var %y: float32
let %x2: float32 = add(%y, %y) /* ty=float32 */;
if (True /* ty=bool */) {
add(%x, %x2) /* ty=float32 */
} else {
add(3f /* ty=float32 */, %x2) /* ty=float32 */
}
} else {
if (False /* ty=bool */) {
%x
} else {
1f /* ty=float32 */
}
}
"""
expected_output = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_output, map_free_vars=True)
def test_nested_if():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.add(y, y)
true_branch = relay.If(cond_t, y2, relay.add(three, y2))
false_branch = relay.If(cond_f, two, one)
body = relay.If(x, true_branch, false_branch)
"""
free_var %x: bool
if (%x) {
if (True) {
free_var %y: float32
%0 = add(%y, %y);
%0
} else {
a |
dd(3f, %0)
}
} else {
if (False) {
2f
} else {
1f
}
}
"""
def expected():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.var("y2")
true_branch = relay.If(cond_t, y2, relay.add(three, y2))
true_branch = relay.Let(y2, relay.add(y, y), true_branch)
false_branch = relay.If(cond_f, two, one)
body = relay.If(x, true_branch, false_branch)
return body
bblock = run_opt_pass(body, [transform.ToBasicBlockNormalForm(), transform.InferType()])
"""
free_var %x: bool
if (%x) {
free_var %y: float32
let %x1: float32 = add(%y, %y) /* ty=float32 */;
if (True /* ty=bool */) {
%x1
} else {
add(3f /* ty=float32 */, %x1) /* ty=float32 */
}
} else {
if (False /* ty=bool */) {
2f /* ty=float32 */
} else {
1f /* ty=float32 */
}
}
"""
expected_output = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_output, map_free_vars=True)
check_basic_block_normal_form(bblock)
def test_recursion():
"""
Program:
let f(n: i32) -> i32 = {
m = (n * 2)
if (n == 0) {
return m;
} else {
return m + f(n - 1);
}
}
f(5);
"""
mod = tvm.IRModule()
i64 = relay.TensorType((), "int64")
f = relay.GlobalVar("f")
n = relay.Var("n", i64)
m = n * relay.const(2, "int64")
cond = relay.equal(n, relay.const(0, "int64"))
false_branch = m + f(n - relay.const(1, "int64"))
funcbody = relay.If(cond, m, false_branch)
value = relay.Function([n], funcbody, i64, [])
mod[f] = value
check_eval(f(re |
lay.const(5, "int64")), 30.0, mod=mod)
old_f = mod[f]
mod = transform.ToBasicBlockNormalForm()(mod)
f = mod[f]
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
check_basic_block_normal_form(f)
def test_ref():
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
check_eval(body, 3)
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
check_eval(opt_body, 3)
check_basic_block_normal_form(opt_body)
def test_nat_add():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
add = p.mod.get_global_var("nat_add")
dev = tvm.device("llvm", 0)
assert mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
assert (
count(p, create_executor(mod=mod, device=dev, target="llvm").evaluate(add(s(z()), s(z()))))
== 2
)
expr = add(s(z()), s(z()))
f = relay.GlobalVar("f")
mod[f] = relay.Function([], expr)
mod = transform.InferType()(mod)
mod = transform.ToBasicBlockNormalForm()(mod)
opt_expr = mod["f"]
assert (
count(p, create_executor(mod=mod, device=dev, target="llvm").evaluate(opt_expr.body)) == 2
)
assert not Feature.fLet in detect_feature(mod[add])
check_basic_block_normal_form(opt_expr)
def test_let():
def test_let1():
x = relay.Var("x")
c = relay.const(4.0, "float32")
body = relay.Let(x, c, x)
body = run_opt_pass(body, transform.InferType())
"""
let %x: float32 = 4f /* ty=float32 */;
%x
"""
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
assert tvm.ir.structural_equal(body, opt_body)
check_basic_block_normal_form |
(opt_body)
def test_let1_1():
x = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(x, d, relay.add(x, x))
body = run_opt_pass(body, transform.InferType())
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
assert tvm.ir.structural_equal(body, opt_body)
check_basic_block_normal_form(opt_body)
def test_let2():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, x)
body = relay.Let(x, d, body)
body = run_opt_pass(body, transform.InferType())
check_eval(body, 4)
def expected():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, y)
body = relay.Let(x, d, body)
return body
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
expected_body = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt_body, expected_body)
check_basic_block_normal_form(opt_body)
def test_let3():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
c = relay.const(3.0, "float32")
d = relay.const(4.0, "float32")
body = relay.Let(z, x + y, x + z)
body = relay.Let(x, d, body)
body = relay.Let(y, c, body)
body = run_opt_pass(body, transform.InferType())
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
assert tvm.ir.structural_equal(body, opt_body)
check_basic_block_normal_form(opt_body)
test_let1()
test_let1_1()
test_let2()
test_let3()
def test_function():
t = relay.TensorType((), "float32")
x = relay.Var("x", t)
f = relay.Function([x], x + x)
d = relay.const(4.0, "float32")
bblock = run_opt_pass(f, transform.ToBasicBlockNormalForm())
assert isinstance(bblock, relay.Function)
check_eval(f(d), 8) |
check_eval(bblock(d), 8)
check_basic_block_normal_form(bblock)
def test_gradient_if():
x = relay.var("a", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
net = relay.If(cond, x, x)
net = relay.add(x, net)
net = relay.Function([cond, x, y], net)
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.ToBasicBlockNormalForm()(mod)
mod = relay.transform.InferType()(mod)
net_grad = relay.transform.gradient(mod["main"], mode="higher_order")
mod["main"] = net_grad
mod_grad = relay.transform.ToBasicBlockNormalForm()(mod)
check_basic_block_normal_form(mod_grad["main"])
check_basic_block_normal_form(mod["main"])
def test_if():
def if_expr(x):
"""
free_var %x: float32
%0 = equal(%x, 2f);
if (%0) {
%1 = add(%x, 1f);
multiply(%1, 2f)
} else {
multiply(%1, 1f)
}
"""
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.add(x, one)
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
return body
def expected_if_expr(x):
"""
free_var %x: float32
let %v1: float32 = add(%x, 1f /* ty=float32 */) /* ty=float32 */;
%0 = equal(%x, 2f /* ty=float32 */) /* ty=bool */;
if (%0) {
multiply(%v1, 2f /* ty=float32 */) /* ty=float32 */
} else {
multiply(%v1, 1f /* ty=float32 */) /* ty=float32 */
}
"""
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.var("v1")
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
body = relay.Let(v1, relay.add(x, one), body)
return |
body
x = relay.var("x", shape=(), dtype="float32")
body = if_expr(x)
expected_body = expected_if_expr(x)
bblock = run_opt_pass(body, [transform.ToBasicBlockNormalForm(), transform.InferType()])
expected_bblock = run_opt_pass(expected_body, transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_bblock, map_free_vars=True)
check_basic_block_normal_form(bblock)
func = relay.Function([x], body)
expected_func = relay.Function([x], expected_body)
bblock = run_opt_pass(func, [transform.ToBasicBlockNormalForm(), transform.InferType()])
expected_bblock = run_opt_pass(expected_func, transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_bblock)
check_basic_block_normal_form(bblock)
def test_higher_order_return():
x = relay.var("x", shape=(1,), dtype="float32")
y = relay.var("y", shape=(1,), dtype="float32")
z = relay.var("z", shape=(1,), dtype="float32")
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y))
func_b = relay.Function([z], relay.add(x2, z))
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
bblock = run_opt_pass(body, transform.ToBasicBlockNormalForm())
check_basic_block_normal_form(bblock)
def test_higher_order_nested():
x = relay.var("x", dtype="float32", shape=(1,))
s = relay.var("s", dtype="float32", shape=(1,))
shared = relay.add(s, s)
func_true = relay.Function([x], relay.add(x, shared))
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
z = relay.Var("z")
body = relay.If(f(), func_true, relay.Function([z], relay.add(z, shared)))
top = relay.Function([f, s], body)
"""
fn (%f: fn () -> bool, %s: Tensor[(1), float32]) |
{
%0 = %f();
if (%0) {
fn (%x: Tensor[(1), float32]) {
%1 = add(%s, %s);
add(%x, %1)
}
} else {
fn (%z) {
add(%z, %1)
}
}
}
"""
bblock = run_opt_pass(top, transform.ToBasicBlockNormalForm())
check_basic_block_normal_form(bblock)
def test_immutability():
simple_net = relay.nn.conv2d(
data=relay.var("data", relay.TensorType((1, 3, 224, 224), "float32")),
weight=relay.var("weight"),
kernel_size=(5, 5),
channels=3,
padding=(1, 1),
)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, _ = create_workload(simple_net)
old_mod = mod
with tvm.transform.PassContext(opt_level=4):
with tvm.target.Target("llvm"):
seq = tvm.transform.Sequential(passes=[transform.ToBasicBlockNormalForm()], opt_level=4)
new_mod = seq(mod)
assert old_mod.astext() == mod.astext()
assert old_mod.astext() != new_mod.astext()
if __name__ == "__main__":
pytest.main([__file__]) |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.analysis |
import detect_feature
from tvm.relay.transform |
import to_cps, un_cps
from tvm.relay.analysis |
import Feature
from tvm.relay.prelude |
import Prelude
from tvm.relay.testing |
import make_nat_expr, rand, run_infer_type, run_opt_pass
from tvm.relay |
import create_executor
from tvm.relay |
import transform
def test_id():
x = relay.var("x", shape=[])
id = run_infer_type(relay.Function([x], x))
id_cps = run_infer_type(to_cps(id))
def test_double():
t = relay.TypeVar("t")
x = relay.var("x", t)
f = relay.var("f", relay.FuncType([t], t))
double = run_infer_type(relay.Function([f, x], f(f(x)), t, [t]))
double_cps = run_infer_type(to_cps(double))
def test_recursion():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat_iterate = p.mod.get_global_var("nat_iterate")
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
mod["main"] = func
mod = relay.transform.InferType()(mod)
mod["main"] = to_cps(mod["main"], mod=mod)
mod = relay.transform.InferType()(mod)
mod["main"] = un_cps(mod["main"])
i_nd = rand(dtype, *shape)
forward = create_executor(mod=mod).evaluate()(i_nd)
tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy())
def test_cps_pe():
def destroy_ref(x):
x = run_infer_type(x)
x = to_cps(x)
x = run_infer_type(x)
y = un_cps(x)
y = run_infer_type(y)
x = run_opt_pass(
x,
tvm.transform.Sequential(
[
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
),
)
assert Feature.fRefCreate not in detect_feature(x)
unit = relay.Function([], relay.const(0.0, dtype="float32"))
f_ref = relay.Var("f_ref")
one = relay.const(1.0, dtype="float32")
two = relay.const(2.0, dtype="float32")
cond = relay.var(shape=(), dtype="uint1", name_hint="cond")
true_branch = relay.RefWrite(f_ref, relay.Function([], one))
fal |
se_branch = relay.RefWrite(f_ref, relay.Function([], two))
if_expr = relay.If(cond, true_branch, false_branch)
stmt = relay.Let(
f_ref,
relay.RefCreate(unit),
relay.Let(relay.Var("x"), if_expr, relay.Call(relay.RefRead(f_ref), [])),
)
F = relay.Function([cond], stmt)
destroy_ref(F)
G = relay.Function([cond], relay.If(cond, one, two))
G = run_infer_type(G)
G = relay.transform.gradient(G)
destroy_ref(G)
x = relay.var("x", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
z = relay.var("z", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
H = relay.If(cond, x, y)
H = relay.add(H, z)
H = relay.Function([cond, x, y, z], H)
H = run_infer_type(H)
H = relay.transform.gradient(H)
destroy_ref(H)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.relay |
import op, create_executor, transform
from tvm.relay.analysis |
import Feature
from tvm.relay.analysis |
import detect_feature
def run_opt_pass(expr, opt_pass):
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
if mod is None:
mod = tvm.IRModule()
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)(*args)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_implicit_share():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
body = relay.Let(z, op.add(y, y), op.add(z, z))
body = relay.Let(y, op.add(x, x), body)
f = relay.Function([], relay.Let(x, relay.const(1), body))
g = run_opt_pass(f, transform.ToGraphNormalForm())
assert Feature.fLet in detect_feature(f)
assert not Feature.fLet in detect_feature(g)
check_eval(f, [], 8.0)
check_eval(g, [], 8.0)
def test_round_trip():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
body = relay.Let(z, op.add(y, y), op.add(z, z))
body = relay.Let(y, op.add(x, x), body)
f = relay.Function([], relay.Let(x, relay.const(1), body))
g = run_opt_pass(f, transform.ToGraphNormalForm())
h = run_opt_pass(g, transform.ToANormalForm())
assert Feature.fLet in detect_feature(f)
assert not Feature.fLet in detect_feature(g)
check_eval(f, [], 8.0)
check_eval(g, [], 8.0)
check_eval(h, [], 8.0)
if __name__ == "__main__":
test_implicit_share()
test_round_trip() |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.prelude |
import Prelude
from tvm.relay.analysis |
import unmatched_cases |
import pytest
def test_empty_match_block():
v = relay.Var("v")
match = relay.Match(v, [])
unmatched = unmatched_cases(match)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternWildcard)
def test_trivial_matches():
v = relay.Var("v")
match = relay.Match(v, [relay.Clause(relay.PatternWildcard(), v)])
assert len(unmatched_cases(match)) == 0
w = relay.Var("w")
match = relay.Match(v, [relay.Clause(relay.PatternVar(w), w)])
assert len(unmatched_cases(match)) == 0
def test_single_constructor_adt():
mod = tvm.IRModule()
box = relay.GlobalTypeVar("box")
a = relay.TypeVar("a")
box_ctor = relay.Constructor("box", [a], box)
box_data = relay.TypeData(box, [a], [box_ctor])
mod[box] = box_data
v = relay.Var("v")
match = relay.Match(
v, [relay.Clause(relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), v)]
)
assert len(unmatched_cases(match, mod)) == 0
nested_pattern = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
box_ctor,
[
relay.PatternConstructor(
box_ctor,
[relay.PatternConstructor(box_ctor, [relay.PatternWildcard()])],
)
],
),
v,
)
],
)
assert len(unmatched_cases(nested_pattern, mod)) == 0
def test_too_specific_match():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
v = relay.Var("v")
match = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()] |
),
],
),
v,
)
],
)
unmatched = unmatched_cases(match, mod)
nil_found = False
single_length_found = False
assert len(unmatched) == 2
for case in unmatched:
assert isinstance(case, relay.PatternConstructor)
if case.constructor == nil:
nil_found = True
if case.constructor == cons:
assert isinstance(case.patterns[1], relay.PatternConstructor)
assert case.patterns[1].constructor == nil
single_length_found = True
assert nil_found and single_length_found
new_match = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
v,
),
relay.Clause(relay.PatternWildcard(), v),
],
)
assert len(unmatched_cases(new_match, mod)) == 0
def test_multiple_constructor_clauses():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
v = relay.Var("v")
match = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
v,
),
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
],
),
v |
,
),
relay.Clause(relay.PatternConstructor(nil, []), v),
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
v,
),
],
)
assert len(unmatched_cases(match, mod)) == 0
def test_missing_in_the_middle():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
v = relay.Var("v")
match = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
v,
),
relay.Clause(relay.PatternConstructor(nil, []), v),
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
],
),
v,
),
],
)
unmatched = unmatched_cases(match, mod)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternConstructor)
assert unmatched[0].constructor == cons
assert isinstance(unmatched[0].patterns[1], relay.PatternConstructor)
assert unmatched[0].patterns[1] |
.constructor == cons
assert isinstance(unmatched[0].patterns[1].patterns[1], relay.PatternConstructor)
assert unmatched[0].patterns[1].patterns[1].constructor == nil
def test_mixed_adt_constructors():
mod = tvm.IRModule()
box = relay.GlobalTypeVar("box")
a = relay.TypeVar("a")
box_ctor = relay.Constructor("box", [a], box)
box_data = relay.TypeData(box, [a], [box_ctor])
mod[box] = box_data
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
v = relay.Var("v")
box_of_lists_inc = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
box_ctor,
[
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
)
],
),
v,
)
],
)
unmatched = unmatched_cases(box_of_lists_inc, mod)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternConstructor)
assert unmatched[0].constructor == box_ctor
assert len(unmatched[0].patterns) == 1 and unmatched[0].patterns[0].constructor == nil
box_of_lists_comp = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(box_ctor, [relay.PatternConstructor(nil, [])]), v
),
relay.Clause(
relay.PatternConstructor(
box_ctor,
[
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
)
],
),
v,
),
],
)
assert len(unmatched_cases(box_of_lists_comp, mod)) == 0
list_of_boxes_inc = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[ |
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternWildcard(),
],
),
v,
)
],
)
unmatched = unmatched_cases(list_of_boxes_inc, mod)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternConstructor)
assert unmatched[0].constructor == nil
list_of_boxes_comp = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(nil, []),
],
),
v,
),
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(nil, []),
],
),
],
),
v,
),
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(
cons,
[ |
relay.PatternConstructor(
box_ctor, [relay.PatternWildcard()]
),
relay.PatternConstructor(nil, []),
],
),
],
),
],
),
v,
),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternWildcard()]),
v,
),
relay.Clause(relay.PatternConstructor(nil, []), v),
],
)
assert len(unmatched_cases(list_of_boxes_comp, mod)) == 0
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
assert len(unmatched_cases(x)) == 0
def test_inf_loop_case():
code = """
type Arith[A] {
Zero,
Const(A),
Plus(Arith[A], Arith[A])
}
def @shallow_opt[A](%a: Arith[A]) -> Arith[A] {
match (%a) {
Plus(Zero, %r) => %r,
Plus(%l, Zero) => %l,
_ => %a
}
}
"""
tvm.parser.fromtext(code)
def test_expanding_ctor_with_no_args():
code = """
type List[A] {
Cons(A, List[A]),
Nil,
}
def @expand_on_nil_match(%a: List[(List[()],)]) -> int {
match (%a) {
Cons((Nil), Nil) => 1,
_ => 2,
}
}
"""
tvm.parser.fromtext(code)
def test_expanding_empty_tuple():
code = """
type List[A] {
Cons(A, List[A]),
Nil,
}
def @expand_on_empty_tuple_match(%a: (List[()], ())) -> int {
match (%a) {
(Cons((), Nil), ()) => 1,
_ => 2,
}
}
"""
tvm.parser.fromtext(code)
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay.analysis |
import (
free_vars,
free_type_vars,
bound_vars,
bound_type_vars,
all_vars,
all_type_vars,
)
def assert_vars_match(actual, expected):
assert len(actual) == len(expected)
for i in range(len(actual)):
assert actual[i] == expected[i]
def test_free_vars():
ty = relay.TensorType([], "int32")
x = relay.Var("x", ty)
fvx = free_vars(x)
assert len(fvx) == 1
assert fvx[0] == x
v = relay.Constant(tvm.nd.array(10))
let = relay.Let(x, v, x)
fvx = free_vars(let)
assert len(free_vars(let)) == 0
f = relay.Function([x], x, ty)
assert len(free_vars(f)) == 0
def test_free_vars_tuple():
t = relay.Var("t")
fv = free_vars(relay.Tuple([t, t]))
assert len(fv) == 1
assert fv[0] == t
fv = free_vars(relay.TupleGetItem(t, 123))
assert len(fv) == 1
assert fv[0] == t
def test_free_type_vars():
tp = relay.TypeVar("")
ty = relay.TupleType([tp, relay.TensorType([], "int32")])
x = relay.Var("x", ty)
y = relay.Var("y")
let = relay.Let(x, y, x)
fvl = free_vars(let)
assert len(fvl) == 1
assert fvl[0] == y
ftvl = free_type_vars(let)
assert len(ftvl) == 1
assert ftvl[0] == tp
def test_bound_vars():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
a = relay.Var("a")
f1 = relay.Function([x, y, z], relay.Let(a, x, relay.Tuple([])))
assert_vars_match(bound_vars(f1), [x, y, z, a])
tup = relay.Tuple([x, y, z, a])
assert len(bound_vars(tup)) == 0
f2 = relay.Function([x, y], relay.Tuple([x, y, z, a]))
assert_vars_match(bound_vars(f2), [x, y])
def test_match_vars():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
match1 = relay.Match(
nil(),
[
relay.Clause(relay.PatternConstructor(nil), z),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(x), rel |
ay.PatternVar(y)]),
cons(x, y),
),
],
)
match2 = relay.Match(
nil(),
[
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternVar(x)]), y
),
relay.Clause(relay.PatternWildcard(), z),
],
)
assert_vars_match(bound_vars(match1), [x, y])
assert_vars_match(free_vars(match1), [z])
assert_vars_match(all_vars(match1), [z, x, y])
assert_vars_match(bound_vars(match2), [x])
assert_vars_match(free_vars(match2), [y, z])
assert_vars_match(all_vars(match2), [x, y, z])
def test_bound_type_vars():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
ft1 = relay.FuncType([a], b, [a, b])
bound_ft1 = bound_type_vars(ft1)
assert_vars_match(bound_type_vars(ft1), [a, b])
ft2 = relay.FuncType([], c, [a])
assert_vars_match(bound_type_vars(ft2), [a])
tup_ty = relay.TupleType([a, b, c])
assert len(bound_type_vars(tup_ty)) == 0
f1 = relay.Function([], relay.Tuple([]), type_params=[a, b])
assert_vars_match(bound_type_vars(f1), [a, b])
f2 = relay.Function([], relay.Tuple([]), c)
assert len(bound_type_vars(f2)) == 0
x = relay.Var("x", a)
let1 = relay.Let(x, relay.Tuple([]), x)
assert len(bound_type_vars(let1)) == 0
let2 = relay.Let(x, relay.Function([], relay.Tuple([]), type_params=[b, c]), x)
assert_vars_match(bound_type_vars(let2), [b, c])
def test_all_vars():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
f1 = relay.Function([x, y], z)
assert_vars_match(all_vars(f1), [x, y, z])
f2 = relay.Function([x], relay.Let(y, relay.Tuple([]), z))
assert_vars_match(all_vars(f2), [x, y, z])
f3 = relay.Function([x], relay.Tuple([y, z]))
assert_vars_match(all_vars(f3), [x, y, z])
tup = relay.Tuple([x, y, z])
assert_vars_match(all_vars(tup), [x, y, z])
def test_all_type_vars():
a = relay.TypeVar("a")
b = relay.T |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.