text
stringlengths
1
2.05k
import testing from tvm.contrib
import utils from tvm
import rpc
import tvm.testing from tvm.relay.transform
import InferType from tvm.relay.testing
import mlp from tvm.relay.dataflow_pattern
import wildcard, is_op from tvm.relay.backend.vm
import VMCompiler def check_result(target, dev, args, expected_result, mod): """ Check that evaluating `expr` applied to the arguments produces `result` on Relay VM. Parameters ---------- args: list of Expr The arguments to supply the expr. expected_result: The expected result of running the expression. """ rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()(*args) tvm.testing.assert_allclose(expected_result, rts_result.numpy()) def veval(f, *args, device=tvm.cpu(), target="llvm"): if isinstance(f, relay.Expr): mod = tvm.IRModule() mod["main"] = f else: assert isinstance(f, tvm.IRModule), "expected expression or module" mod = f exe = relay.vm.compile(mod, target) vm = runtime.vm.VirtualMachine(exe, device) return vm.invoke("main", *args) def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): return [o.numpy().tolist()] elif isinstance(o, tvm.runtime.container.ADT): result = [] for f in o: result.extend(vmobj_to_list(f)) return result else: raise RuntimeError("Unknown object type: %s" % type(o)) def test_split(target, dev): x = relay.var("x", shape=(12,)) y = relay.split(x, 3, axis=0).astuple() f = relay.Function([x], y) x_data = np.random.rand( 12, ).astype("float32") ref_res = np.split(x_data, 3, axis=0) res = veval(f, x_data, device=dev, target=target) for i in range(3): tvm.testing.assert_allclose(res[i].numpy(), ref_res[i]) def test_split_no_fuse(target, dev): x = relay.var("x", shape=(12,)) y = relay.split(x, 3, axis=0).astuple() z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0) z = relay.annotation.stop_fusion(z) f = relay.Function([x], z) x_data = np.random.rand( 12, ).astype("float32") res = veval(f, x_data, device=dev, target=target) tvm.testing.assert_allclose(res.numpy(), np.split(x_da
ta, 3, axis=0)[0]) def test_id(target, dev): x = relay.var("x", shape=(10, 10), dtype="float64") f = relay.Function([x], x) x_data = np.random.rand(10, 10).astype("float64") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [x_data], x_data, mod) def test_op(target, dev): x = relay.var("x", shape=(10, 10)) f = relay.Function([x], x + x) x_data = np.random.rand(10, 10).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [x_data], 2 * x_data, mod) def any(x): x = relay.op.nn.batch_flatten(x) return relay.op.min(x, axis=[0, 1]) @tvm.testing.known_failing_targets("vulkan") def test_cond(target, dev): x = relay.var("x", shape=(10, 10)) y = relay.var("y", shape=(10, 10)) f = relay.Function([x, y], any(relay.op.equal(x, y))) x_data = np.random.rand(10, 10).astype("float32") y_data = np.random.rand(10, 10).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [x_data, x_data], True, mod) check_result(target, dev, [x_data, y_data], False, mod) @tvm.testing.known_failing_targets("vulkan") def test_simple_if(target, dev): x = relay.var("x", shape=(10, 10)) y = relay.var("y", shape=(10, 10)) f = relay.Function([x, y], relay.If(any(relay.op.equal(x, y)), x, y)) x_data = np.random.rand(10, 10).astype("float32") y_data = np.random.rand(10, 10).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [x_data, x_data], x_data, mod) check_result(target, dev, [x_data, y_data], y_data, mod) @tvm.testing.parametrize_targets("llvm") def test_multiple_ifs(target, dev): mod = tvm.IRModule({}) b = relay.var("b") v0 = relay.var("v0") v1 = relay.var("v1") v2 = relay.var("v2") v3 = relay.var("v3") out = relay.Tuple([v2, v3]) out = relay.Let(v3, relay.If(b, v1, v0), out) out = relay.Let(v2, relay.If(b, v0, v1), out) out = relay.Let(v1, relay.Tup
le([relay.const(1)]), out) out = relay.Let(v0, relay.Tuple([relay.const(0)]), out) fn = relay.Function([b], out) mod["main"] = fn func = relay.create_executor(device=dev, mod=mod, kind="vm").evaluate() res = vmobj_to_list(func(False)) assert res == [1, 0] def test_unused_function(target, dev): cond = relay.const(True) mod = tvm.IRModule() then_name = relay.GlobalVar("times_2") else_name = relay.GlobalVar("times_3") t1 = relay.TensorType((2, 2), dtype="float32") x1 = relay.var("x1", t1, dtype="float32") x2 = relay.var("x2", t1, dtype="float32") f2 = relay.multiply(x1, relay.const(2.0)) f3 = relay.multiply(x2, relay.const(3.0)) mod[then_name] = relay.Function([x1], f2) mod[else_name] = relay.Function([x2], f3) mod = InferType()(mod) x3 = relay.var("x3", t1, dtype="float32") f = relay.If(cond, then_name(x3), else_name(x3)) mod["main"] = relay.Function([x3], f) x_data = np.random.rand(2, 2).astype("float32") y_data = x_data * 2 check_result(target, dev, [x_data], y_data, mod) def test_simple_call(target, dev): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") sb = ScopeBuilder() sb.ret(i) func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32")) mod[sum_up] = func i_data = np.array(0, dtype="int32") iarg = relay.var("iarg", shape=[], dtype="int32") mod["main"] = relay.Function([iarg], sum_up(iarg)) check_result(target, dev, [i_data], i_data, mod) def test_count_loop(target, dev): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") sb = ScopeBuilder() with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))): sb.ret(i) with sb.else_scope(): one_less = relay.subtract(i, relay.const(1, dtype="int32")) rec_call = relay.Call(sum_up, [one_less]) sb.ret(relay.add(rec_call, i)) func = re
lay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32")) mod[sum_up] = func i_data = np.array(0, dtype="int32") iarg = relay.var("i", shape=[], dtype="int32") mod["main"] = relay.Function([iarg], sum_up(iarg)) result = veval(mod, i_data, device=dev, target=target) tvm.testing.assert_allclose(result.numpy(), i_data) check_result(target, dev, [i_data], i_data, mod) def test_sum_loop(target, dev): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") accum = relay.var("accum", shape=[], dtype="int32") sb = ScopeBuilder() with sb.if_scope(relay.equal(i, relay.const(0, "int32"))): sb.ret(accum) with sb.else_scope(): one_less = relay.subtract(i, relay.const(1, "int32")) new_accum = relay.add(accum, i) sb.ret(relay.Call(sum_up, [one_less, new_accum])) func = relay.Function([i, accum], sb.get()) mod[sum_up] = func mod = relay.transform.InferType()(mod) loop_bound = 0 i_data = np.array(loop_bound, dtype="int32") accum_data = np.array(0, dtype="int32") iarg = relay.var("i", shape=[], dtype="int32") aarg = relay.var("accum", shape=[], dtype="int32") mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg)) check_result(target, dev, [i_data, accum_data], sum(range(1, loop_bound + 1)), mod) def test_tuple_fst(target, dev): ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))]) tup = relay.var("tup", type_annotation=ttype) f = relay.Function([tup], relay.TupleGetItem(tup, 0)) i_data = np.random.rand(41).astype("float32") j_data = np.random.rand(10).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [(i_data, j_data)], i_data, mod) def test_tuple_second(target, dev): ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))]) tup = relay.var("tup", type_annotation=ttype) f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype("float32") j_data = np.random.rand(10).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [(i_data, j_data)], j_data, mod) def test_list_constructor(target, dev): mod = tvm.IRModule() p = Prelude(mod) l, cons, nil = mod.get_type("List") one2 = cons(relay.const(1), nil()) one3 = cons(relay.const(2), one2) one4 = cons(relay.const(3), one3) f = relay.Function([], one4) mod["main"] = f result = veval(mod, device=dev, target=target) assert len(result) == 2 assert len(result[1]) == 2 obj = vmobj_to_list(result) tvm.testing.assert_allclose(obj, np.array([3, 2, 1])) def test_let_tensor(target, dev): sb = relay.ScopeBuilder() shape = (1,) x = relay.var("x", shape=shape, dtype="float32") x1 = relay.var("x1", shape=shape, dtype="float32") x1 = sb.let(x1, x) xplusone = x1 + relay.const(42.0, "float32") sb.ret(xplusone) body = sb.get() f = relay.Function([x], body) x_data = np.random.rand(*shape).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [x_data], x_data + 42.0, mod) def test_let_scalar(target, dev): sb = relay.ScopeBuilder() x = relay.var("x", "float32") x1 = sb.let("x1", x) xplusone = x1 + relay.const(42.0, "float32") sb.ret(xplusone) body = sb.get() f = relay.Function([x], body) x_data = np.array(np.random.rand()).astype("float32") mod = tvm.IRModule() mod["main"] = f check_result(target, dev, [x_data], x_data + 42.0, mod) def test_compose(target, dev): mod = tvm.IRModule() p = Prelude(mod) compose = p.compose sb = relay.ScopeBuilder() x = relay.var("x", "float32") x1 = sb.let("x1", x) xplusone = x1 + relay.const(1.0, "float32") sb.ret(xplusone) body = sb.get() add_one = relay.GlobalVar("add_one") add_one_func = relay.Function([x], body) sb = relay.ScopeBuilder() y = relay.va
r("y", "float32") add_two_func = sb.let("add_two", compose(add_one_func, add_one_func)) add_two_res = add_two_func(y) sb.ret(add_two_res) add_two_body = sb.get() mod[add_one] = add_one_func f = relay.Function([y], add_two_body) mod["main"] = f x_data = np.array(np.random.rand()).astype("float32") result = veval(mod, [x_data], device=dev, target=target) tvm.testing.assert_allclose(result.numpy(), x_data + 2.0) def test_list_hd(target, dev): mod = tvm.IRModule() p = Prelude(mod) l, cons, nil = mod.get_type("List") hd = mod.get_global_var("hd") one2 = cons(relay.const(1), nil()) one3 = cons(relay.const(2), one2) one4 = cons(relay.const(3), one3) three = hd(one4) f = relay.Function([], three) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(result.numpy(), 3) def test_list_tl_empty_list(target, dev): mod = tvm.IRModule() p = Prelude(mod) l, cons, nil = mod.get_type("List") tl = mod.get_global_var("tl") f = relay.Function([], tl(nil())) mod["main"] = f with pytest.raises(tvm.error.TVMError): result = veval(mod, device=dev, target=target) def test_list_tl(target, dev): mod = tvm.IRModule() p = Prelude(mod) l, cons, nil = mod.get_type("List") tl = mod.get_global_var("tl") one2 = cons(relay.const(1), nil()) one3 = cons(relay.const(2), one2) one4 = cons(relay.const(3), one3) f = relay.Function([], tl(one4)) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2, 1])) def test_list_nth(target, dev): expected = list(range(10)) for i in range(len(expected)): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") nth = mod.get_global_var("nth") l = nil() for i in reversed(expected): l = cons(relay.const(i), l) f = relay.Function(
[], nth(l, relay.const(i))) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(result.numpy(), expected[i]) def test_list_update(target, dev): expected = list(range(10)) mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") update = mod.get_global_var("update") l = nil() for i in range(len(expected)): l = cons(relay.const(0), l) for i, v in enumerate(expected): l = update(l, relay.const(i), relay.const(v)) f = relay.Function([], l) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected)) def test_list_length(target, dev): expected = list(range(10)) mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") length = mod.get_global_var("length") l = nil() for _ in range(len(expected)): l = cons(relay.const(0), l) l = length(l) f = relay.Function([], l) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(result.numpy(), 10) def test_list_map(target, dev): mod = tvm.IRModule() p = Prelude(mod) x = relay.var("x", "int32") add_one_func = relay.Function([x], relay.const(1) + x) _, cons, nil = mod.get_type("List") map = mod.get_global_var("map") l = cons(relay.const(2), cons(relay.const(1), nil())) f = relay.Function([], map(add_one_func, l)) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2])) def test_list_foldl(target, dev): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") foldl = mod.get_global_var("foldl") x = relay.var("x") y = relay.var("y") rev_dup_func = relay.Function([y, x], cons(x, cons(x, y))) l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil(
)))) f = relay.Function([], foldl(rev_dup_func, nil(), l)) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1])) def test_list_foldr(target, dev): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") foldr = mod.get_global_var("foldr") x = relay.var("x") y = relay.var("y") identity_func = relay.Function([x, y], cons(x, y)) l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil()))) f = relay.Function([], foldr(identity_func, nil(), l)) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3])) def test_list_sum(target, dev): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") sum = mod.get_global_var("sum") l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil()))) f = relay.Function([], sum(l)) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(result.numpy(), 6) def test_list_filter(target, dev): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") filter = mod.get_global_var("filter") x = relay.var("x", "int32") greater_than_one = relay.Function([x], x > relay.const(1)) l = cons( relay.const(1), cons( relay.const(3), cons(relay.const(1), cons(relay.const(5), cons(relay.const(1), nil()))) ), ) f = relay.Function([], filter(greater_than_one, l)) mod["main"] = f result = veval(mod, device=dev, target=target) tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5])) def test_closure(target, dev): x = relay.var("x", shape=()) y = relay.var("y", shape=()) f = relay.Function([x], x + y) ff = relay.Function([y], f) clo = ff(relay.const(1.0)) main = clo(relay.const(2.0)) res = veval(main,
device=dev, target=target) tvm.testing.assert_allclose(res.numpy(), 3.0) def test_add_op_scalar(target, dev): """ test_add_op_scalar: fn (x, y) { return x + y; } """ mod = tvm.IRModule() x = relay.var("x", shape=()) y = relay.var("y", shape=()) func = relay.Function([x, y], relay.op.add(x, y)) x_y_data = [ (np.array(10.0, dtype="float32"), np.array(1.0, dtype="float32")), (np.float32(10.0), np.float32(1.0)), (10.0, 1.0), ] for (x_data, y_data) in x_y_data: mod["main"] = func check_result(target, dev, [x_data, y_data], x_data + y_data, mod) def test_add_op_scalar_float16(target, dev): """ test_add_op_scalar_float16: fn (x, y) { return x + y; } """ mod = tvm.IRModule() x = relay.var("x", shape=(), dtype="float16") y = relay.var("y", shape=(), dtype="float16") func = relay.Function([x, y], relay.op.add(x, y)) x_y_data = [ (np.array(10.0, dtype="float16"), np.array(1.0, dtype="float16")), (np.float16(10.0), np.float16(1.0)), ] for (x_data, y_data) in x_y_data: mod["main"] = func check_result(target, dev, [x_data, y_data], x_data + y_data, mod) def test_add_op_scalar_int(target, dev): """ test_add_op_scalar_int: fn (x, y) { return x + y; } """ mod = tvm.IRModule() x = relay.var("x", shape=(), dtype="int32") y = relay.var("y", shape=(), dtype="int32") func = relay.Function([x, y], relay.op.add(x, y)) x_y_data = [ (np.array(10.0, dtype="int32"), np.array(1.0, dtype="int32")), (np.int32(10), np.int32(1)), (10, 1), ] for (x_data, y_data) in x_y_data: mod["main"] = func check_result(target, dev, [x_data, y_data], x_data + y_data, mod) def test_add_op_tensor(target, dev): """ test_add_op_tensor: fn (x, y) { return x + y; } """ mod = tvm.IRModule()
x = relay.var("x", shape=(10, 5)) y = relay.var("y", shape=(10, 5)) func = relay.Function([x, y], relay.op.add(x, y)) x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(10, 5).astype("float32") mod["main"] = func check_result(target, dev, [x_data, y_data], x_data + y_data, mod) def test_add_op_broadcast(target, dev): """ test_add_op_broadcast: fn (x, y) { return x + y; } """ mod = tvm.IRModule() x = relay.var("x", shape=(10, 5)) y = relay.var("y", shape=(1, 5)) func = relay.Function([x, y], relay.op.add(x, y)) x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(1, 5).astype("float32") mod["main"] = func check_result(target, dev, [x_data, y_data], x_data + y_data, mod) def test_vm_optimize_dynamic(): dtype = "float32" x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype=dtype) y = relay.var("y", shape=(relay.Any(), relay.Any()), dtype=dtype) mod = tvm.IRModule() mod["main"] = relay.Function([x, y], relay.add(x, y)) comp = relay.vm.VMCompiler() opt_mod, _ = comp.optimize(mod, target="llvm") assert "shape_func" in opt_mod.astext(False) def test_vm_optimize(): mod, params = testing.synthetic.get_workload() comp = relay.vm.VMCompiler() opt_mod, _ = comp.optimize(mod, target="llvm", params=params) free_vars = relay.analysis.free_vars(opt_mod["main"].body) assert len(free_vars) == 1 def test_loop_free_var(target, dev): x = relay.var("x", shape=(), dtype="int32") i = relay.var("i", shape=(), dtype="int32") s = relay.var("s", shape=(), dtype="int32") def cond(i, _): return i < relay.const(10, dtype="int32") def body_no_free_var(i, acc): incr = relay.const(1, "int32") return i + incr, acc + i def body_with_free_var(i, acc): incr = relay.const(1, "int32") return i + incr, acc + x for args, body, expected in zip([[], [1]], [body_no_free_var, bod
y_with_free_var], [45, 10]): loop = while_loop(cond, [i, s], body) tup = loop(relay.const(0, dtype="int32"), relay.zeros(shape=(), dtype="int32")) ret = relay.TupleGetItem(tup, 1) mod = tvm.IRModule() mod["main"] = relay.Function(relay.analysis.free_vars(ret), ret) check_result(target, dev, args, expected, mod) def test_vm_reshape_tensor(target, dev): x_np = np.random.uniform(size=(8, 16)).astype("float32") x = relay.var("x", shape=(8, 16), dtype="float32") y = relay.reshape(x, [-1, 4, 8]) mod = tvm.IRModule() mod["main"] = relay.Function([x], y) with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert "reshape_tensor" in exec.bytecode check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod) x = relay.var("x", shape=(8, 16), dtype="float32") y = relay.reshape(x, [16, -1]) y = relay.reverse_reshape(y, [-1, 4, 0]) mod = tvm.IRModule() mod["main"] = relay.Function([x], y) with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert exec.bytecode.count("reshape_tensor") == 1 check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod) for n in [tvm.tir.Any(), tvm.te.size_var("n")]: x = relay.var("x", shape=(n, 16), dtype="float32") y = relay.reshape(x, [-1, 4]) y = relay.reshape(y, [0, 2, -1]) mod = tvm.IRModule() mod["main"] = relay.Function([x], y) with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert exec.bytecode.count("reshape_tensor") == 1 check_result(target, dev, [x_np], x_np.reshape([32, 2, 2]), mod) x = relay.var("x", shape=(8, 16), dtype="float32") y = relay.var("y", shape=(3,), dtype="int32") z = relay.reshape(x, [-1, 4, 8]) z = relay.reshape(z, y) mod = tvm.IRModule() mod["main"] = relay.Function([x, y], z) with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm") assert exec.bytecode.count("reshape_tensor") == 2 assert "reshape_tensor" in exec.bytecode y_np = np.array([8, 2, 8]).astype("int32") check_result(target, dev, [x_np, y_np], x_np.reshape([8, 2, 8]), mod) def test_vm_reshape_and_copy(target, dev): """Make sure the compiler notices the reshape result shape is a literal and can use the immediate-mode alloc_tensor instruction instead of alloc_tensor_reg.""" x_np = np.random.uniform(size=(1, 1)).astype("float32") x = relay.var("x", shape=(1, 1), dtype="float32") mod = tvm.IRModule.from_expr(relay.Function([x], relay.copy(relay.reshape(x, [0, 1])))) with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert "alloc_tensor" in exec.bytecode assert not "alloc_tensor_reg" in exec.bytecode check_result(target, dev, [x_np], x_np.reshape([1, 1]), mod) def test_vm_reshape_tuple(target, dev, x_shape=(1, 4, 2), y_shape=(1, 2, 10)): tup = relay.var( "tup", type_annotation=relay.TupleType([relay.TensorType(x_shape), relay.TensorType(y_shape)]), ) out = relay.reshape(relay.TupleGetItem(tup, 0), (1, -1)) f = relay.Function([tup], out) x_data = np.random.uniform(size=x_shape).astype("float32") y_data = np.random.uniform(size=y_shape).astype("float32") res = veval(f, (x_data, y_data), device=dev, target=target) tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1))) def test_constant_shape_with_external_codegen(): @tvm.register_func("relay.ext.test1") def relay_ext_test(func): return None mod = tvm.IRModule() shape = (relay.Any(), 25) dtype = "float32" x = relay.var("x", shape=shape, dtype=dtype) weight = relay.const(np.random.rand(5, 25).astype("float32"), dtype="float32") out = relay.nn.dense(x, weight) f1 = relay.Function([x], out) f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) f1 = f1.with_attr("Inline", tvm.tir.
IntImm("int32", 1)) f1 = f1.with_attr("Compiler", "test1") f1 = f1.with_attr("global_symbol", "f1") glb_f1 = relay.GlobalVar("f1") mod[glb_f1] = f1 mod = relay.transform.InferType()(mod) x = relay.var("x", shape=shape, dtype=dtype) mod["main"] = relay.Function([x], glb_f1(x)) comp = relay.vm.VMCompiler() opt_mod, _ = comp.optimize(mod, target="llvm") assert "shape_func" in opt_mod.astext(False) def prepare_vm_model(path, tensor_shape): """ Virtual Machine is compiled for simple topology and exported as library to given path """ target = tvm.target.Target("llvm --host=llvm") x = relay.var("x", shape=tensor_shape) f = relay.Function([x], x + x) mod = IRModule.from_expr(f) vm_exec = vm.compile(mod, target=target) vm_exec.mod.export_library(path) def test_vm_rpc(): """ This test checks to make sure you can export a VMExecutable, upload it to a remote machine using RPC and then execute it on the other machine. """ shape = (10, 1) temp = utils.tempdir() path = temp.relpath("vm_library.so") prepare_vm_model(path, shape) def check_remote(server): remote = rpc.connect(server.host, server.port, session_timeout=10) remote.upload(path) rexec = remote.load_module("vm_library.so") device = remote.cpu() vm_factory = runtime.vm.VirtualMachine(rexec, device) np_input = np.random.uniform(size=shape).astype("float32") input_tensor = tvm.nd.array(np_input, device) out = vm_factory.invoke("main", input_tensor) np.testing.assert_allclose(out.numpy(), np_input + np_input) check_remote(rpc.Server("127.0.0.1")) def test_vm_invoke_with_outputs_rpc(): """ This test checks to make sure you can export a VMExecutable, upload it to a remote machine using RPC and then execute it on the other machine with preallocated outputs. """
shape = (3, 2) temp = utils.tempdir() path = temp.relpath("vm_library.so") prepare_vm_model(path, shape) def check_remote_invoke_with_outputs(server): remote = rpc.connect(server.host, server.port, session_timeout=10) remote.upload(path) rexec = remote.load_module("vm_library.so") device = remote.cpu() vm_factory = runtime.vm.VirtualMachine(rexec, device) np_input = np.random.uniform(size=shape).astype("float32") input_tensor = tvm.nd.array(np_input, device) np_output = np.empty(shape, dtype="float32") output_tensor = tvm.nd.array(np_output, device) vm_factory.invoke_with_outputs( "main", input_args={"x": input_tensor}, output_args=[output_tensor] ) np.testing.assert_allclose(output_tensor.numpy(), np_input + np_input) check_remote_invoke_with_outputs(rpc.Server("127.0.0.1")) def test_vm_invoke_with_outputs(): target = tvm.target.Target("llvm") shape = (3, 2) x = relay.var("x", shape=shape) f = relay.Function([x], x + x) mod = IRModule.from_expr(f) vm_exec = vm.compile(mod, target=target) vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu()) np_input = np.random.uniform(size=shape).astype("float32") input_tensor = tvm.nd.array(np_input) np_output = np.empty(shape, dtype="float32") output_tensor = tvm.nd.array(np_output) vm_factory.invoke_with_outputs( "main", input_args={"x": input_tensor}, output_args=[output_tensor] ) np.testing.assert_allclose(output_tensor.numpy(), np_input + np_input) def test_get_output_single(): target = tvm.target.Target("llvm") x = relay.var("x", shape=(10,)) f = relay.Function([x], x + x) mod = IRModule.from_expr(f) vm_exec = vm.compile(mod, target=target) vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu()) inp = np.ones(10, dtype="float32")
vm_factory.invoke_stateful("main", inp) outputs = vm_factory.get_outputs() assert len(outputs) == 1 np.testing.assert_allclose(outputs[0].numpy(), inp + inp) @tvm.testing.parametrize_targets("llvm") def test_get_output_multiple(target, dev): x = relay.var("x", shape=(10,)) f = relay.Function([x], relay.Tuple([x + x, x])) mod = IRModule.from_expr(f) vm_exec = vm.compile(mod, target=target) vm_factory = runtime.vm.VirtualMachine(vm_exec, dev) inp = np.ones(10, dtype="float32") vm_factory.invoke_stateful("main", inp) outputs = vm_factory.get_outputs() assert len(outputs) == 2 np.testing.assert_allclose(outputs[0].numpy(), inp + inp) np.testing.assert_allclose(outputs[1].numpy(), inp) @tvm.testing.parametrize_targets("llvm") def test_get_input_index(target, dev): data_0, data_1 = ["d1", "d2"] x, y = [relay.var(c, shape=(10,)) for c in [data_0, data_1]] f = relay.Function([x, y], x + y) mod = IRModule.from_expr(f) vm_exec = vm.compile(mod, target=target) vm_factory = runtime.vm.VirtualMachine(vm_exec, dev) assert vm_factory.get_input_index(data_1) == 1 assert vm_factory.get_input_index(data_0) == 0 assert vm_factory.get_input_index("invalid") == -1 def get_one_input_relay_mod(tensor_type, shape, data_name): x = relay.var(data_name, shape=shape, dtype=tensor_type) y = relay.exp(x) f = relay.Function([x], y) return IRModule.from_expr(f) @tvm.testing.parametrize_targets("llvm") def test_one_set_input(target, dev): dtype = "float32" in_shape = [1, 2, 3, 3] in_data_name_0 = "d0" mod = get_one_input_relay_mod(dtype, in_shape, in_data_name_0) vm_exec = vm.compile(mod, target=target) exe = runtime.vm.VirtualMachine(vm_exec, dev) data0_core = np.random.uniform(size=in_shape).astype(dtype) data0 = tvm.nd.array(data0_core) ref_res_core = np.exp(data0_core) ref_res = tvm.nd.array(ref_res_core) exe.set_input("main", data0) output = exe.
invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) data_dict = {in_data_name_0: data0} exe.set_input("main", **data_dict) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) def get_multiple_input_relay_mod(tensor_type, shape, data_name0, data_name1): x, y = [relay.var(c, shape=shape, dtype=tensor_type) for c in [data_name0, data_name1]] f = relay.Function([x, y], x + y) return IRModule.from_expr(f) @tvm.testing.parametrize_targets("llvm") def test_multiple_set_input(target, dev): dtype = "float32" in_shape = [1, 2, 3, 3] in_data_name_0 = "d0" in_data_name_1 = "d1" mod = get_multiple_input_relay_mod(dtype, in_shape, in_data_name_0, in_data_name_1) vm_exec = vm.compile(mod, target=target) exe = runtime.vm.VirtualMachine(vm_exec, dev) data0_core = np.random.uniform(size=in_shape).astype(dtype) data0 = tvm.nd.array(data0_core) data1_core = np.random.uniform(size=in_shape).astype(dtype) data1 = tvm.nd.array(data1_core) ref_res_core = data0_core + data1_core ref_res = tvm.nd.array(ref_res_core) exe.set_input("main", data0, data1) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) data_dict = {in_data_name_1: data1, in_data_name_0: data0} exe.set_input("main", **data_dict) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) @tvm.testing.parametrize_targets("llvm") def test_one_set_one_input(target, dev): dtype = "float32" in_shape = [1, 2, 3, 3] in_data_name_0 = "d0" mod = get_one_input_relay_mod(dtype, in_shape, in_data_name_0) vm_exec = vm.compile(mod, target=target) exe = runtime.vm.VirtualMachine(vm_exec, dev) data0_core = np.random.uniform(size=in_shape).astype(dtype)
data0 = tvm.nd.array(data0_core) ref_res_core = np.exp(data0_core) ref_res = tvm.nd.array(ref_res_core) exe.set_one_input("main", 0, data0) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) exe.set_one_input("main", in_data_name_0, data0) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) data_dict = {in_data_name_0: data0} exe.set_one_input("main", **data_dict) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) @tvm.testing.parametrize_targets("llvm") def test_multiple_set_one_input(target, dev): dtype = "float32" in_shape = [1, 2, 3, 3] in_data_name_0 = "d0" in_data_name_1 = "d1" mod = get_multiple_input_relay_mod(dtype, in_shape, in_data_name_0, in_data_name_1) vm_exec = vm.compile(mod, target=target) exe = runtime.vm.VirtualMachine(vm_exec, dev) data0_core = np.random.uniform(size=in_shape).astype(dtype) data0 = tvm.nd.array(data0_core) data1_core = np.random.uniform(size=in_shape).astype(dtype) data1 = tvm.nd.array(data1_core) ref_res_core = data0_core + data1_core ref_res = tvm.nd.array(ref_res_core) exe.set_one_input("main", 1, data1) exe.set_one_input("main", 0, data0) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) exe.set_one_input("main", in_data_name_1, data1) exe.set_one_input("main", in_data_name_0, data0) output = exe.invoke("main") assert output.dtype == ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) data_dict = {in_data_name_1: data1} exe.set_one_input("main", **data_dict) data_dict = {in_data_name_0: data0} exe.set_one_input("main", **data_dict) output = exe.invoke("main") assert output.dtype
== ref_res.dtype tvm.testing.assert_allclose(ref_res_core, output.numpy()) @tvm.testing.parametrize_targets("llvm") def test_benchmark(target, dev): mod, params = mlp.get_workload(1) lib = vm.compile(mod, target=target, params=params) exe = runtime.vm.VirtualMachine(lib, tvm.cpu()) data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32")) result = exe.benchmark(tvm.cpu(), data, func_name="main", repeat=2, number=1) assert result.mean == result.median assert result.mean > 0 assert len(result.results) == 2 with patch.object( tvm.runtime.module.Module, "time_evaluator", return_value=lambda x: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]), ) as method: result = exe.benchmark(dev, data, func_name="main", repeat=2, number=1) assert result.mean == 2.5 assert result.median == 2.0 assert result.max == 5 assert result.min == 1 assert result.std == 1.5 def test_benchmark_end_to_end(target, dev): mod, params = mlp.get_workload(1) lib = vm.compile(mod, target=target, params=params) exe = runtime.vm.VirtualMachine(lib, dev) data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev) result = exe.benchmark(dev, data, func_name="main", repeat=2, number=1, end_to_end=True) assert result.mean > 0 @tvm.testing.requires_cuda def test_benchmark_end_to_end_rpc(): server = rpc.Server("127.0.0.1") remote = rpc.connect(server.host, server.port) mod, params = mlp.get_workload(1) lib = vm.compile(mod, target="cuda", params=params) temp = utils.tempdir() path = temp.relpath("vm_library.so") lib.mod.export_library(path) remote.upload(path) rlib = remote.load_module("vm_library.so") exe = runtime.vm.VirtualMachine(rlib, remote.device("cuda")) data = tvm.nd.array( np.random.rand(1, 1, 28, 28).astype("float32"), device=remote.device("cuda") ) result = exe.benchmark( remote.device("cuda"), data=dat
a, func_name="main", repeat=2, number=1, end_to_end=True ) assert result.mean > 0 def test_shape_func_nested_function(): @tvm.register_func("relay.ext.test2") def relay_ext_test(func): return None data_shape = (relay.Any(), 16) weight_shape = (relay.Any(), 16) dense = relay.nn.dense( relay.var("data", shape=data_shape), relay.var("weight", shape=weight_shape) ) mod = tvm.IRModule.from_expr(dense) patterns = [("test.dense", is_op("nn.dense")(wildcard(), wildcard()))] passes = tvm.transform.Sequential( [ relay.transform.MergeComposite(patterns), relay.transform.AnnotateTarget(["test2"]), relay.transform.PartitionGraph(), ] ) mod = passes(mod) compiler = VMCompiler() compiler.lower(mod, "llvm") @tvm.testing.requires_cuda def test_storage_size_and_offset_on_cpu(): """Tests allocations place sizes and offsets on the CPU host even if the rest of the computation is on a different device type.""" def input(): return tvm.parser.fromtext( """ def @main(%a: Tensor[(5, 7), float32], param_device_types=[2], result_device_type=2) { add(%a, %a) } """ ) exe = relay.vm.compile( input(), tvm.target.Target("cuda"), ) assert "VirtualDevice[0]: device type 1" in exe.virtual_devices assert "VM Const[0]: NDArray[(),int64,(1,0)]=[140] on device index 0" in exe.constants assert "VM Const[1]: NDArray[(),int64,(1,0)]=[0] on device index 0" in exe.constants @tvm.testing.requires_cuda def test_reshape_shape_on_cpu(): """Tests the argument to a reshape places the shape on the CPU host even if the rest of the computation is on a different device type.""" def input(): return tvm.parser.fromtext( """ def @main(%x: Tensor[(2,
8), float32], param_device_types=[2], result_device_type=2) { reshape(%x, newshape=[2, 4, 2]) } """ ) exe = relay.vm.compile( input(), tvm.target.Target("cuda"), ) assert "VirtualDevice[0]: device type 1" in exe.virtual_devices assert "VM Const[0]: NDArray[(3),int64,(1,0)]=[2,4,2] on device index 0" in exe.constants @tvm.testing.requires_cuda def test_multi_targets(): n = 10 x = relay.var("x", shape=(n,)) y = relay.var("y", shape=(n,)) z = relay.var("z", shape=(n,)) f = relay.Function([x, y, z], x + relay.op.annotation.on_device(y + z, tvm.cpu())) mod = IRModule.from_expr(f) with tvm.transform.PassContext( opt_level=3, config={"relay.fallback_device_type": tvm.cuda().device_type} ): exe = relay.vm.compile( mod, target={"cpu": tvm.target.Target("llvm"), "cuda": tvm.target.Target("cuda")} ) vm = runtime.vm.VirtualMachine(exe, [tvm.cuda(), tvm.cpu()]) x_data = np.random.rand( n, ).astype("float32") y_data = np.random.rand( n, ).astype("float32") z_data = np.random.rand( n, ).astype("float32") actual_result = vm.invoke("main", x_data, y_data, z_data) expected_result = x_data + y_data + z_data tvm.testing.assert_allclose(actual_result.numpy(), expected_result) def test_let_bound_constants(): """This tests for an ICHECK failure for ill-formed IR with let-bound constants""" x = relay.var("x", shape=(3,), dtype="int32") y = relay.take(x, relay.const(0)) z = relay.const(1) f = relay.Function([x], relay.stack((z, y), axis=0)) mod = IRModule.from_expr(f) compiler = VMCompiler() compiler.optimize(mod, target="llvm") def test_large_constants(): """Large constants can be serialized outside of executable""" target = tvm.target.Target("llvm") dev = tvm.cpu() x = relay.var("x", shape=(1000, 1000)) const_data =
np.random.rand(1000, 1000).astype("float32") const = relay.const(const_data, dtype="float32") func = relay.Function([x], relay.op.add(x, const)) mod = tvm.IRModule.from_expr(func) vm_exec = vm.compile(mod, target=target) temp = utils.tempdir() path_consts = temp.relpath("consts") vm_exec.move_late_bound_consts(path_consts, byte_limit=256) path_dso = temp.relpath("lib.so") vm_exec.mod.export_library(path_dso) mod = runtime.load_module(path_dso) mod["load_late_bound_consts"](path_consts) x_data = np.random.rand(1000, 1000).astype("float32") the_vm = runtime.vm.VirtualMachine(mod, dev) actual = the_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) mod = runtime.load_module(path_dso) exe = runtime.vm.Executable(mod) exe.load_late_bound_consts(path_consts) x_data = np.random.rand(1000, 1000).astype("float32") the_vm = runtime.vm.VirtualMachine(exe, dev) actual = the_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) def test_load_late_bound_consts_with_no_late_bound_consts(): """Check that load_late_bound_consts handles a model with no late bound consts.""" target = tvm.target.Target("llvm") dev = tvm.cpu() const_data = np.random.rand(1).astype("float64") x = relay.var("x", shape=(1,), dtype="float64") const = relay.const(const_data, dtype="float64") func = relay.Function([x], relay.op.add(x, const)) mod = tvm.IRModule.from_expr(func) vm_exec = vm.compile(mod, target=target) temp = utils.tempdir() path_consts = temp.relpath("consts") path_dso = temp.relpath("lib.so") byte_limit = len(const_data.tobytes()) + 1 vm_exec.move_late_bound_consts(path_consts, byte_limit=byte_limit) vm_exec.mod.export_library(path_dso) mod = runtime.load_module(path_dso) mod["load_late_bound_consts"](path_co
nsts) x_data = np.random.rand(1).astype("float64") loaded_vm = runtime.vm.VirtualMachine(mod, dev) actual = loaded_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) def test_vm_save_and_load_without_designating_late_bound_consts(): """Check that a VM can be saved and loaded without late-bound consts in play. Specifically, this test ensures that the machinery behind late-bound const loading does not assume the need to load late-bound consts (and cause an error) when the user did not choose to designate any consts as such. """ target = tvm.target.Target("llvm") dev = tvm.cpu() const_data = np.random.rand(1).astype("float64") x = relay.var("x", shape=(1,), dtype="float64") const = relay.const(const_data, dtype="float64") func = relay.Function([x], relay.op.add(x, const)) mod = tvm.IRModule.from_expr(func) vm_exec = vm.compile(mod, target=target) code, lib = vm_exec.save() exe = runtime.vm.Executable.load_exec(code, lib) x_data = np.random.rand(1).astype("float64") loaded_vm = runtime.vm.VirtualMachine(exe, dev) actual = loaded_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) def test_load_and_save_constants_via_map(): """Large constants can be serialized outside of executable""" target = tvm.target.Target("llvm") dev = tvm.cpu() x = relay.var("x", shape=(1000, 1000)) const_data = np.random.rand(1000, 1000).astype("float32") const = relay.const(const_data, dtype="float32") func = relay.Function([x], relay.op.add(x, const)) mod = tvm.IRModule.from_expr(func) vm_exec = vm.compile(mod, target=target) consts_map = vm_exec.get_late_bound_consts(byte_limit=256) temp = utils.tempdir() path_dso = temp.relpath("lib.so") vm_exec.mod.export_library(path_dso) mod = runtime.load_module(path_dso) mod["load_late_bound_
consts_from_map"](consts_map) x_data = np.random.rand(1000, 1000).astype("float32") the_vm = runtime.vm.VirtualMachine(mod, dev) actual = the_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) mod = runtime.load_module(path_dso) exe = runtime.vm.Executable(mod) exe.load_late_bound_consts_from_map(consts_map) x_data = np.random.rand(1000, 1000).astype("float32") the_vm = runtime.vm.VirtualMachine(exe, dev) actual = the_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) def test_load_late_bound_consts_via_map_with_no_late_bound_consts(): """Check that load_late_bound_consts handles a model with no late bound consts.""" target = tvm.target.Target("llvm") dev = tvm.cpu() const_data = np.random.rand(1).astype("float64") x = relay.var("x", shape=(1,), dtype="float64") const = relay.const(const_data, dtype="float64") func = relay.Function([x], relay.op.add(x, const)) mod = tvm.IRModule.from_expr(func) vm_exec = vm.compile(mod, target=target) temp = utils.tempdir() path_dso = temp.relpath("lib.so") byte_limit = len(const_data.tobytes()) + 1 consts_map = vm_exec.get_late_bound_consts(byte_limit=byte_limit) vm_exec.mod.export_library(path_dso) mod = runtime.load_module(path_dso) mod["load_late_bound_consts_from_map"](consts_map) x_data = np.random.rand(1).astype("float64") loaded_vm = runtime.vm.VirtualMachine(mod, dev) actual = loaded_vm.invoke("main", x_data) expected = x_data + const_data tvm.testing.assert_allclose(expected, actual.numpy()) if __name__ == "__main__": tvm.testing.main()
"""Unit tests for the Relay VM serialization and deserialization."""
import pytest
import numpy as np
import tvm from tvm.runtime
import vm as _vm from tvm.relay
import vm as rly_vm from tvm
import relay from tvm.relay.scope_builder
import ScopeBuilder from tvm.relay
import transform from tvm.relay.prelude
import Prelude from tvm.contrib
import utils from tvm.relay
import testing def create_exec(f, target="llvm", params=None): if isinstance(f, relay.Expr): mod = tvm.IRModule() mod["main"] = f executable = rly_vm.compile(mod, target=target, params=params) return executable else: assert isinstance(f, tvm.IRModule), "expected mod as tvm.IRModule" executable = rly_vm.compile(f, target=target, params=params) return executable def get_serialized_output(mod, *data, params=None, target="llvm", device=tvm.cpu()): exe = create_exec(mod, target, params=params) code, lib = exe.save() des_exec = _vm.Executable.load_exec(code, lib) des_vm = _vm.VirtualMachine(des_exec, device) result = des_vm.run(*data) return result def run_network(mod, params, dtype="float32"): def get_vm_output(mod, data, params, target, device, dtype="float32"): result = relay.create_executor("vm", mod=mod, device=device).evaluate()(data, **params) return result.numpy().astype(dtype) data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape] data = np.random.uniform(size=data_shape).astype(dtype) target = "llvm" dev = tvm.cpu(0) tvm_out = get_vm_output(mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype) vm_out = get_serialized_output( mod, tvm.nd.array(data.astype(dtype)), params=params, target=target, device=dev ) tvm.testing.assert_allclose(vm_out.numpy().astype(dtype), tvm_out, rtol=1e-5, atol=1e-5) def test_serializer(): mod = tvm.IRModule({}) a = relay.const(1.0, "float32") x = relay.var("x", shape=(10, 10), dtype="float32") f1 = relay.Function([x], x + a) glb_f1 = relay.GlobalVar("f1") mod[glb_f1] = f1 mod = transform.InferType()(mod) b = relay.const(2.0, "float32") y = relay.var("y", shape=(10, 10), dtype="float32") f2 = relay.Function([y], y - b) glb_f2 = relay.GlobalVar("f2") mod[glb_f2] = f2 mod = transform.InferType()(mod) x1 = relay.var("x1", shape=(1
0, 10), dtype="float32") y1 = relay.var("y1", shape=(10, 10), dtype="float32") main = relay.Function([x1, y1], glb_f1(x1) * glb_f2(y1)) mod["main"] = main exe = create_exec(mod) glbs = exe.globals assert len(glbs) == 3 assert "f1" in glbs assert "f2" in glbs assert "main" in glbs prim_ops = exe.primitive_ops assert any(item.startswith("vm_mod_fused_add") for item in prim_ops) assert any(item.startswith("vm_mod_fused_subtract") for item in prim_ops) assert any(item.startswith("vm_mod_fused_multiply") for item in prim_ops) code = exe.bytecode assert "main(x1, y1)" in code assert "f1(x)" in code assert "f2(y)" in code code, lib = exe.save() assert isinstance(code, bytearray) assert isinstance(lib, tvm.runtime.Module) def test_save_load(): x = relay.var("x", shape=(10, 10)) f = relay.Function([x], x + x) x_data = np.random.rand(10, 10).astype("float32") vm = create_exec(f) code, lib = vm.save() assert isinstance(code, bytearray) tmp = utils.tempdir() path_lib = tmp.relpath("lib.so") lib.export_library(path_lib) with open(tmp.relpath("code.ro"), "wb") as fo: fo.write(code) loaded_lib = tvm.runtime.load_module(path_lib) loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read()) des_exec = _vm.Executable.load_exec(loaded_code, loaded_lib) des_vm = _vm.VirtualMachine(des_exec, tvm.cpu()) res = des_vm.run(x_data) tvm.testing.assert_allclose(res.numpy(), x_data + x_data) def test_const(): c = relay.const(1.0, "float32") x = relay.var("x", shape=(10, 10), dtype="float32") f = relay.Function([x], x + c) x_data = np.random.rand(10, 10).astype("float32") res = get_serialized_output(f, x_data) tvm.testing.assert_allclose(res.numpy(), x_data + 1) def test_if(): x = relay.var("x", shape=(10, 10)) y = relay.var("y", shape=(10, 10)) equal = relay.op.equal(x, y) equal = relay.op.nn.batch_flatten(equal)
f = relay.Function([x, y], relay.If(relay.op.min(equal, axis=[0, 1]), x, y)) x_data = np.random.rand(10, 10).astype("float32") y_data = np.random.rand(10, 10).astype("float32") res = get_serialized_output(f, x_data, x_data) tvm.testing.assert_allclose(res.numpy(), x_data) res = get_serialized_output(f, x_data, y_data) tvm.testing.assert_allclose(res.numpy(), y_data) def test_loop(): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") accum = relay.var("accum", shape=[], dtype="int32") sb = ScopeBuilder() with sb.if_scope(relay.equal(i, relay.const(0, "int32"))): sb.ret(accum) with sb.else_scope(): one_less = relay.subtract(i, relay.const(1, "int32")) new_accum = relay.add(accum, i) sb.ret(relay.Call(sum_up, [one_less, new_accum])) func = relay.Function([i, accum], sb.get()) mod[sum_up] = func mod = transform.InferType()(mod) loop_bound = 0 i_data = np.array(loop_bound, dtype="int32") accum_data = np.array(0, dtype="int32") iarg = relay.var("i", shape=[], dtype="int32") aarg = relay.var("accum", shape=[], dtype="int32") mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg)) result = get_serialized_output(mod, i_data, accum_data) tvm.testing.assert_allclose(result.numpy(), sum(range(1, loop_bound + 1))) def test_tuple(): ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))]) tup = relay.var("tup", type_annotation=ttype) f = relay.Function([tup], relay.TupleGetItem(tup, 1)) i_data = np.random.rand(41).astype("float32") j_data = np.random.rand(10).astype("float32") result = get_serialized_output(f, (i_data, j_data)) tvm.testing.assert_allclose(result.numpy(), j_data) def test_adt_list(): mod = tvm.IRModule() p = Prelude(mod) _, cons, nil = mod.get_type("List") l1 = cons(relay.const(1), nil()) l21 = cons(relay.const(2), l1) l321 = cons(relay.co
nst(3), l21) f = relay.Function([], l321) mod["main"] = f result = get_serialized_output(mod) assert len(result) == 2 assert len(result[1]) == 2 assert len(result[1][1]) == 2 res = [] res.append(result[0].numpy().tolist()) res.append(result[1][0].numpy().tolist()) res.append(result[1][1][0].numpy().tolist()) tvm.testing.assert_allclose(res, np.array([3, 2, 1])) def test_adt_compose(): mod = tvm.IRModule() p = Prelude(mod) compose = mod.get_global_var("compose") sb = relay.ScopeBuilder() x = relay.var("x", "float32") x1 = sb.let("x1", x) xplusone = x1 + relay.const(1.0, "float32") sb.ret(xplusone) body = sb.get() add_one = relay.GlobalVar("add_one") add_one_func = relay.Function([x], body) sb = relay.ScopeBuilder() y = relay.var("y", "float32") add_two_func = sb.let("add_two", compose(add_one_func, add_one_func)) add_two_res = add_two_func(y) sb.ret(add_two_res) add_two_body = sb.get() mod[add_one] = add_one_func f = relay.Function([y], add_two_body) mod["main"] = f x_data = np.array(np.random.rand()).astype("float32") result = get_serialized_output(mod, x_data) tvm.testing.assert_allclose(result.numpy(), x_data + 2.0) def test_closure(): x = relay.var("x", shape=()) y = relay.var("y", shape=()) f = relay.Function([x], x + y) ff = relay.Function([y], f) clo = ff(relay.const(1.0)) main = clo(relay.const(2.0)) res = get_serialized_output(main) tvm.testing.assert_allclose(res.numpy(), 3.0) def test_synthetic(): mod, params = testing.synthetic.get_workload() run_network(mod, params) def test_mobilenet(): mod, params = testing.mobilenet.get_workload(batch_size=1) run_network(mod, params) def test_vm_shape_of(): x = relay.var("x", shape=(relay.Any(), relay.Any(), relay.Any()), dtype="float32") relu_x = relay.nn.relu(x) data = np.random.uniform(size=(2, 3, 4)).astype("float32") args = [data]
newshape_var = relay.var("newshape", shape=(2,), dtype="int64") args.append(np.array((1, -1), dtype="int64")) main = relay.Function([x, newshape_var], relay.reshape(relu_x, newshape=newshape_var)) res = get_serialized_output(main, *args).numpy() tvm.testing.assert_allclose(res.flatten(), data.flatten()) def test_dynamic_bcast(): dtype = "float32" x = relay.var("x", shape=(relay.Any(), 2), dtype=dtype) y = relay.var("y", shape=(3, 2), dtype=dtype) mod = tvm.IRModule() mod["main"] = relay.Function([x, y], relay.add(x, y)) x_data = np.random.uniform(size=(1, 2)).astype(dtype) y_data = np.random.uniform(size=(3, 2)).astype(dtype) res_np = np.add(x_data, y_data) for target, dev in testing.enabled_targets(): res = get_serialized_output(mod, *(x_data, y_data), target=target, device=dev) tvm.testing.assert_allclose(res.numpy(), res_np) if __name__ == "__main__": pytest.main([__file__])
"""Unit tests for the CapturePostDfsIndexInSpans debugging pass."""
import tvm
import tvm.testing
import numpy as np def make_const(dtype, shape): return tvm.relay.const(np.random.rand(*shape).astype(dtype)) def make_consts(dtype, shapes): return [make_const(dtype, shape) for shape in shapes] metatable = { "relay.Constant": make_consts( "float16", [ (2304, 768), (2304,), (600, 32, 64), ], ) } def input_mod(): return tvm.parser.parse( """ def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { %0 = nn.dense(%x0, meta[relay.Constant][0], units=2304); %1 = add(%0, meta[relay.Constant][1]); %2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16], Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16], PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) }; %6(%y_3_i0, %y_3_i1) }; %3 = %2(%x3, meta[relay.Constant][2]); (%1, %3) } """, "from_string", None, metatable, ) expected_pretty_printed_output_mod = r"""def @main(%x0: Tensor[(1600, 768), float16] /* ty=Tensor[(1600, 768), float16] span=index:0:5 */, %x3: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:1:18 */) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { %0 = nn.dense(%x0, meta[relay.Constant][0] /* ty=Tensor[(2304, 768), float16] span=index:4:5 */, units=2304) /* ty=Tensor[(1600, 2304), float16] span=index:5:7 */; %2 = fn (%y_3_i0: Tensor[(
600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:8:15 */, %y_3_i1: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:9:15 */, Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %1 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:10:13 */, %FunctionVar_0_11: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:11:13 */, PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) /* ty=Tensor[(600, 32, 32), float16] span=index:13:14 */ } /* ty=fn (Tensor[(600, 32, 64), float16], Tensor[(600, 32, 64), float16]) -> Tensor[(600, 32, 32), float16] span=index:14:15 */; %1(%y_3_i0, %y_3_i1) /* ty=Tensor[(600, 32, 32), float16] span=index:15:16 */ } /* ty=fn (Tensor[(600, 32, 64), float16], Tensor[(600, 32, 64), float16]) -> Tensor[(600, 32, 32), float16] span=index:16:18 */; %3 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(2304), float16] span=index:6:7 */) /* ty=Tensor[(1600, 2304), float16] span=index:7:19 */; %4 = %2(%x3, meta[relay.Constant][2] /* ty=Tensor[(600, 32, 64), float16] span=index:17:18 */) /* ty=Tensor[(600, 32, 32), float16] span=index:18:19 */; (%3, %4) /* ty=(Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) span=index:19:20 */ } """ def test_capture_index_in_spans(): output_mod = str(tvm.relay.transform.CapturePostDfsIndexInSpans()(input_mod())) assert output_mod == expected_pretty_printed_output_mod if __name__ == "__main__": tvm.testing.main()
"""Unit tests for the OutlineCompilerFunctionsWithExistingGlobalSymbols and MarkCompilerFunctionsAsExtern external codegen helper passes."""
import tvm
import tvm.testing
import numpy as np def make_const(dtype, shape): return tvm.relay.const(np.random.rand(*shape).astype(dtype)) def make_consts(dtype, shapes): return [make_const(dtype, shape) for shape in shapes] metatable = { "relay.Constant": make_consts( "float16", [ (2304, 768), (2304,), (600, 32, 64), ], ) } def original_mod(): return tvm.parser.parse( """ def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { %0 = fn(%y_0_i0: Tensor[(1600, 768), float16], %y_0_i1: Tensor[(2304, 768), float16], %y_0_i2: Tensor[(2304), float16], Inline=1, Compiler="cutlass", global_symbol="tvmgen_default_cutlass_main_0", Primitive=1) -> Tensor[(1600, 2304), float16] { %4 = fn (%FunctionVar_0_0: Tensor[(1600, 768), float16], %FunctionVar_0_1: Tensor[(2304, 768), float16], %FunctionVar_0_2: Tensor[(2304), float16], PartitionedFromPattern="nn.dense_add_", Composite="cutlass.dense_bias") -> Tensor[(1600, 2304), float16] { %5 = nn.dense(%FunctionVar_0_0, %FunctionVar_0_1, units=2304); add(%5, %FunctionVar_0_2) }; %4(%y_0_i0, %y_0_i1, %y_0_i2) }; %1 = %0(%x0, meta[relay.Constant][0], meta[relay.Constant][1]); %2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16], Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16], PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True)
}; %6(%y_3_i0, %y_3_i1) }; %3 = %2(%x3, meta[relay.Constant][2]); (%1, %3) } """, "from_string", None, metatable, ) def original_mod_let_bound(): return tvm.parser.parse( """ def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { let %f = fn(%y_0_i0: Tensor[(1600, 768), float16], %y_0_i1: Tensor[(2304, 768), float16], %y_0_i2: Tensor[(2304), float16], Inline=1, Compiler="cutlass", global_symbol="tvmgen_default_cutlass_main_0", Primitive=1) -> Tensor[(1600, 2304), float16] { %4 = fn (%FunctionVar_0_0: Tensor[(1600, 768), float16], %FunctionVar_0_1: Tensor[(2304, 768), float16], %FunctionVar_0_2: Tensor[(2304), float16], PartitionedFromPattern="nn.dense_add_", Composite="cutlass.dense_bias") -> Tensor[(1600, 2304), float16] { %5 = nn.dense(%FunctionVar_0_0, %FunctionVar_0_1, units=2304); add(%5, %FunctionVar_0_2) }; %4(%y_0_i0, %y_0_i1, %y_0_i2) }; %1 = %f(%x0, meta[relay.Constant][0], meta[relay.Constant][1]); %2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16], Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16], PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) }; %6(%y_3_i0, %y_3_i1) }; %3 = %2(%x3, meta[relay.Constant][2]); (%1, %3) } """,
"from_string", None, metatable, ) def expected_outlined_mod(): return tvm.parser.parse( """ def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { %1 = @tvmgen_default_cutlass_main_0(%x0, meta[relay.Constant][0], meta[relay.Constant][1]); %2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16], Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16], PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) }; %6(%y_3_i0, %y_3_i1) }; %3 = %2(%x3, meta[relay.Constant][2]); (%1, %3) } def @tvmgen_default_cutlass_main_0(%y_0_i0: Tensor[(1600, 768), float16], %y_0_i1: Tensor[(2304, 768), float16], %y_0_i2: Tensor[(2304), float16], Inline=1, Compiler="cutlass", global_symbol="tvmgen_default_cutlass_main_0", Primitive=1) -> Tensor[(1600, 2304), float16] { %4 = fn (%FunctionVar_0_0: Tensor[(1600, 768), float16], %FunctionVar_0_1: Tensor[(2304, 768), float16], %FunctionVar_0_2: Tensor[(2304), float16], PartitionedFromPattern="nn.dense_add_", Composite="cutlass.dense_bias") -> Tensor[(1600, 2304), float16] { %5 = nn.dense(%FunctionVar_0_0, %FunctionVar_0_1, units=2304); add(%5, %FunctionVar_0_2) }; %4(%y_0_i0, %y_0_i1, %y_0_i2) } """, "from_string", None, metatable, ) def expected_extern_mod(): return tvm.parser.parse( """
def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { %1 = @tvmgen_default_cutlass_main_0(%x0, meta[relay.Constant][0], meta[relay.Constant][1]); %2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16], Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16], PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) }; %6(%y_3_i0, %y_3_i1) }; %3 = %2(%x3, meta[relay.Constant][2]); (%1, %3) } def @tvmgen_default_cutlass_main_0(%y_0_i0: Tensor[(1600, 768), float16], %y_0_i1: Tensor[(2304, 768), float16], %y_0_i2: Tensor[(2304), float16], Extern=1) -> Tensor[(1600, 2304), float16] { %4 = fn (%FunctionVar_0_0: Tensor[(1600, 768), float16], %FunctionVar_0_1: Tensor[(2304, 768), float16], %FunctionVar_0_2: Tensor[(2304), float16], PartitionedFromPattern="nn.dense_add_", Composite="cutlass.dense_bias") -> Tensor[(1600, 2304), float16] { %5 = nn.dense(%FunctionVar_0_0, %FunctionVar_0_1, units=2304); add(%5, %FunctionVar_0_2) }; %4(%y_0_i0, %y_0_i1, %y_0_i2) } """, "from_string", None, metatable, ) def expected_inlined_mod(): return tvm.parser.parse( """ def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) { %0 = nn.dense(%x0, meta[rel
ay.Constant][0], units=2304); %1 = add(%0, meta[relay.Constant][1]); %2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16], Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] { %6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16], PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] { nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) }; %6(%y_3_i0, %y_3_i1) }; %3 = %2(%x3, meta[relay.Constant][2]); (%1, %3) } """, "from_string", None, metatable, ) def test_outline_compiler_functions_with_existing_global_symbols(): actual_outlined_mod = tvm.relay.transform.OutlineCompilerFunctionsWithExistingGlobalSymbols( "cutlass" )(original_mod()) tvm.ir.assert_structural_equal(actual_outlined_mod, expected_outlined_mod(), map_free_vars=True) def test_outline_let_bound_compiler_functions_with_existing_global_symbols(): actual_outlined_mod = tvm.relay.transform.OutlineCompilerFunctionsWithExistingGlobalSymbols( "cutlass" )(original_mod_let_bound()) tvm.ir.assert_structural_equal(actual_outlined_mod, expected_outlined_mod(), map_free_vars=True) def test_mark_compiler_functions_as_extern(): actual_extern_mod = tvm.relay.transform.MarkCompilerFunctionsAsExtern("cutlass")( expected_outlined_mod() ) tvm.ir.assert_structural_equal(actual_extern_mod, expected_extern_mod(), map_free_vars=True) def test_inline_compiler_functions(): mod = expected_outlined_mod() gv = mod.get_global_var("tvmgen_default_cutlass_main_0") actual_inlined_mod = tvm.relay.transform.InlineCompilerFunctionsBoundTo([gv])(mod) tvm.ir.assert_struc
tural_equal(actual_inlined_mod, expected_inlined_mod(), map_free_vars=True) if __name__ == "__main__": tvm.testing.main()
import tvm from tvm
import register_func, get_global_func, IRModule from tvm
import relay from tvm.parser
import SpanCheck from tvm.relay.transform
import AnnotateSpans from tvm.runtime
import Object from tvm.ir.diagnostics
import get_renderer, override_renderer from tvm.error
import DiagnosticError DEFAULT_RENDERER = get_renderer() __TESTING__ = None def testing_renderer(diag_ctx): global __TESTING__ if __TESTING__ and __TESTING__.mirror: DEFAULT_RENDERER.render(diag_ctx) if __TESTING__: __TESTING__._render(diag_ctx) class DiagnosticTesting: def __init__(self, mirror=False): self.mirror = mirror self.messages = [] def __enter__(self): global __TESTING__ __TESTING__ = self override_renderer(testing_renderer) return self def __exit__(self, type, value, traceback): global __TESTING__ __TESTING__ = None override_renderer(None) if type is DiagnosticError and self.matches: return True def assert_message(self, in_message): self.messages.append(in_message) def _render(self, diag_ctx): self.matches = False for diagnostic in diag_ctx.diagnostics: message = diagnostic.message for partial_msg in self.messages: if partial_msg in message: self.matches = True
"""Utilities for testing external code generation"""
import os
import sys
import pytest
import tvm from tvm
import relay, runtime, testing from tvm.contrib
import utils skip_windows = pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now") skip_micro = pytest.mark.skipif( tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON", reason="MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.", ) def parametrize_external_codegen_checks(test): """Parametrize over the various check_result functions which are available""" return pytest.mark.parametrize( "check_result", [ pytest.param(check_aot_executor_result, marks=[skip_windows, skip_micro]), pytest.param(check_graph_executor_result, marks=[skip_windows]), pytest.param(check_vm_result, marks=[skip_windows]), ], )(test) def parametrize_external_json_codegen_checks(test): """Parametrize over the various check_result functions which are available for JSON""" return pytest.mark.parametrize( "check_result", [ pytest.param(check_graph_executor_result, marks=[skip_windows]), pytest.param(check_vm_result, marks=[skip_windows]), ], )(test) def update_lib(lib): test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__))) source_dir = os.path.join(test_dir, "..", "..", "..", "..") contrib_path = os.path.join(source_dir, "src", "runtime", "contrib") kwargs = {} kwargs["options"] = ["-O2", "-std=c++17", "-I" + contrib_path] tmp_path = utils.tempdir() lib_name = "lib.so" lib_path = tmp_path.relpath(lib_name) lib.export_library(lib_path, fcompile=False, **kwargs) lib = tvm.runtime.load_module(lib_path) return lib def check_vm_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()): with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): exe = relay.vm.compile(mod, target=target) code, lib = exe.save() lib = update_lib(lib) exe = runtime.vm.Executable.load_exec(code, lib) vm = runtime.vm.VirtualMachine(exe,
device) out = vm.run(**map_inputs) tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol) def check_graph_executor_result( mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu() ): with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): executor_factory = relay.build(mod, target=target) lib = update_lib(executor_factory.lib) rt_mod = tvm.contrib.graph_executor.create(executor_factory.graph_json, lib, device) for name, data in map_inputs.items(): rt_mod.set_input(name, data) rt_mod.run() out = tvm.nd.empty(out_shape, device=device) out = rt_mod.get_output(0, out) tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol) def check_aot_executor_result( mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu() ): from tvm.testing.aot
import AOTTestModel, compile_and_run from tvm.micro.testing.aot_test_utils
import AOT_DEFAULT_RUNNER interface_api = "packed" use_unpacked_api = False test_runner = AOT_DEFAULT_RUNNER compile_and_run( AOTTestModel(module=mod, inputs=map_inputs, outputs={"output": result}), test_runner, interface_api, use_unpacked_api, ) def set_external_func_attr(func, compiler, ext_symbol): func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Compiler", compiler) func = func.with_attr("global_symbol", ext_symbol) return func
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np def gather_nd(data_np, indices_np, batch_dims=0): """gather_nd implemented using numpy""" data_shape = data_np.shape indices_shape = indices_np.shape def gather_nd_batch_dims_1_ref(data, indices): res = [] for i, row in enumerate(data): indices_tuple = tuple(indices[:, i]) # the indices for the i-th batch res.append(row[indices_tuple]) # stack on the batch dim return np.stack(res, 0) if batch_dims > 1: data_np_reshape = np.reshape(data_np, (-1,) + data_shape[batch_dims:]) indices_np_reshape = np.reshape( indices_np, (indices_shape[0], -1) + indices_shape[(batch_dims + 1) :] ) ref_res = gather_nd_batch_dims_1_ref(data_np_reshape, indices_np_reshape) out_shape = indices_shape[1 : (batch_dims + 1)] + ref_res.shape[1:] ref_res = np.reshape(ref_res, out_shape) elif batch_dims == 1: ref_res = gather_nd_batch_dims_1_ref(data_np, indices_np) else: ref_res = data_np[tuple(indices_np)] return ref_res
import pytest
import tvm from tvm.topi.arm_cpu.conv2d_int8
import is_int8_hw_support from tvm.target
import codegen llvm_version, arm_target, input_dtype, kernel_dtype, is_supported = tvm.testing.parameters( (8, "c -mcpu=cortex-m4", "int8", "int8", False), (8, "c -mcpu=cortex-m7", "int8", "int8", False), (8, "c -mcpu=cortex-m33", "int8", "int8", False), (8, "c -mcpu=cortex-m55", "int8", "int8", False), (8, "c -mcpu=cortex-m3", "int8", "int8", False), (7, "llvm -mtriple=arm-linux-gnueabi -mattr=+neon", "int8", "int8", False), (8, "llvm -mtriple=arm-linux-gnueabi -mattr=+neon", "int8", "int8", True), (9, "llvm -mtriple=arm-linux-gnueabi -mattr=+neon", "int8", "int8", True), (8, "llvm -mtriple=arm-linux-gnueabi", "int8", "int8", False), (7, "llvm -mtriple=aarch64-linux-gnu -mattr=+v8.4a,+dotprod", "int8", "int8", False), (8, "llvm -mtriple=aarch64-linux-gnu -mattr=+v8.4a,+dotprod", "int8", "int8", True), (9, "llvm -mtriple=arm-linux-gnueabi -mattr=+neon", "int8", "int8", True), (8, "llvm -mtriple=aarch64-linux-gnu", "int8", "int8", True), (8, "llvm -mtriple=aarch64-linux-gnu -mattr=+neon", "int16", "int8", False), (8, "llvm -mtriple=aarch64-linux-gnu -mattr=+neon", "int8", "int16", False), (8, "llvm -mtriple=aarch64-linux-gnu -mattr=+neon", "int16", "int16", False), ) def test_arm_conv2d_int8_support( monkeypatch, llvm_version, arm_target, input_dtype, kernel_dtype, is_supported ): """Test ARM conv2d int8 support for different targets. Parameters ---------- arm_target : str ARM CPU target. input_dtype : str Conv2d input data type. kernel_dtype : Session Conv2d kernel data type. is_supported : bool Expected result. """ with tvm.target.Target(arm_target): monkeypatch.setattr(codegen, "llvm_version_major", lambda: llvm_version) assert is_int8_hw_support(input_dtype, kernel_dtype) == is_supported
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import tvm import tvm.testing def test_make_virtual_device_for_device(): virtual_device = tvm.target.VirtualDevice(tvm.device("cuda")) assert virtual_device.device_type == 2 # ie kDLCUDA assert virtual_device.virtual_device_id == 0 assert virtual_device.target is None assert virtual_device.memory_scope == "" def test_make_virtual_device_for_device_and_target(): target = tvm.target.Target("cuda") virtual_device = tvm.target.VirtualDevice(tvm.device("cuda"), target) assert virtual_device.device_type == 2 # ie kDLCUDA assert virtual_device.target == target assert virtual_device.memory_scope == "" def test_make_virtual_device_for_device_target_and_memory_scope(): target = tvm.target.Target("cuda") scope = "local" virtual_device = tvm.target.VirtualDevice(tvm.device("cuda"), target, scope) assert virtual_device.device_type == 2 # ie kDLCUDA assert virtual_device.target == target assert virtual_device.memory_scope == scope if __name__ == "__main__": tvm.testing.main()
"""Test retrieving and applying memory scope constraints to PrimFuncs"""
import tvm
import tvm.testing from tvm
import tir from tvm
import relay from tvm.script
import tir as T @T.prim_func def gem(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, [128, 128], scope="scopeA") B = T.match_buffer(b, [128, 128], scope="scopeA") C = T.match_buffer(c, [128, 128], scope="scopeB") D = T.match_buffer(d, [128, 128], scope="scopeC") for i, j, k in T.grid(128, 128, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): D[vi, vj] = C[vi, vj] D[vi, vj] = D[vi, vj] + A[vi, vk] * B[vj, vk] gem_ty = relay.FuncType( [ relay.TupleType( [ relay.TensorType((128, 128), "float32"), relay.TensorType((128, 128), "float32"), ] ), relay.TensorType((128, 128), "float32"), ], relay.TensorType((128, 128), "float32"), ) def test_get_prim_func_arg_and_result_constraints(): scopes = tir.analysis.get_prim_func_arg_and_result_memory_constraints(gem, gem_ty) assert [x for x in scopes] == ["scopeA", "scopeB", "scopeC"] def test_apply_prim_func_arg_and_result_memory_constraints(): rewritten = tir.analysis.apply_prim_func_arg_and_result_memory_constraints( gem, gem_ty, ["scopeX", "scopeY", "scopeZ"] ) scopes = tir.analysis.get_prim_func_arg_and_result_memory_constraints(rewritten, gem_ty) assert [x for x in scopes] == ["scopeX", "scopeY", "scopeZ"] if __name__ == "__main__": tvm.testing.main()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Common utility for topi test""" from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity class Int8Fallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key = (target, workload) if key in self.memory: return self.memory[key] cfg = FallbackConfigEntity() self.memory[key] = cfg cfg.is_fallback = False return cfg
"""Test code for FIFO buffer"""
import tvm from tvm
import te from tvm
import topi
import tvm.testing
import tvm.topi.testing
import numpy as np from tvm.contrib.pickle_memoize
import memoize def verify_fifo_buffer(buffer_shape, data_shape, axis, dtype="float32"): buffer = te.placeholder(buffer_shape, name="buffer", dtype=dtype) data = te.placeholder(data_shape, name="data", dtype=dtype) @memoize("topi.tests.test_fifo_buffer") def get_ref_data(): buffer_np = np.random.uniform(size=buffer_shape).astype(dtype) data_np = np.random.uniform(size=data_shape).astype(dtype) begin = data_np.shape[axis] end = buffer_np.shape[axis] + data_np.shape[axis] ndim = len(buffer_np.shape) ss = tuple((slice(begin, end, 1) if x == axis else slice(None)) for x in range(ndim)) out_np = np.concatenate((buffer_np, data_np), axis=axis)[ss] return (buffer_np, data_np, out_np) buffer_np, data_np, out_np = get_ref_data() def check_device(target, dev): print(" Running on target: {}".format(target)) with tvm.target.Target(target): out = topi.nn.fifo_buffer(data, buffer, axis=axis) s = tvm.topi.testing.get_injective_schedule(target)([out]) buffer_tvm = tvm.nd.array(buffer_np, device=dev) data_tvm = tvm.nd.array(data_np, device=dev) out_tvm = tvm.nd.empty(shape=buffer_shape, device=dev, dtype=dtype) f = tvm.build(s, [data, buffer, out], target, name="fifo") f(data_tvm, buffer_tvm, out_tvm) tvm.testing.assert_allclose(out_tvm.numpy(), out_np) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) def verify_conv1d_integration(): batch_size = 1 num_channel = 1 num_filter = 1 stride = (1, 1) dilate = (1, 1) padding = (0, 0) kernel_size = (1, 3) input_window_size = (1, 10) inc_input_size = (1, 2) context_size = (1, 4) inc_output_size = (1, 2) output_window_size = (1, 8) num_iteration = 20 buffer_axis = 3 kernel_shape = (num_filter, num_channel, kernel_size[0], kernel_size[1]) input_window_shape = (batch_size, n
um_channel, input_window_size[0], input_window_size[1]) inc_input_shape = (batch_size, num_channel, inc_input_size[0], inc_input_size[1]) inc_output_shape = (batch_size, num_filter, inc_output_size[0], inc_output_size[1]) context_shape = (batch_size, num_channel, context_size[0], context_size[1]) output_window_shape = (batch_size, num_filter, output_window_size[0], output_window_size[1]) dtype = "float32" inc_input = te.placeholder(inc_input_shape, name="inc_input", dtype=dtype) input_window = te.placeholder(input_window_shape, name="input_window", dtype=dtype) context = te.placeholder(context_shape, name="context", dtype=dtype) kernel = te.placeholder(kernel_shape, name="kernel", dtype=dtype) inc_output = te.placeholder(inc_input_shape, name="inc_output", dtype=dtype) output_window = te.placeholder(output_window_shape, name="output_window", dtype=dtype) @memoize("topi.tests.test_fifo_buffer_conv1d_integration") def get_data(): inc_input_np = np.random.uniform( size=tuple([num_iteration] + list(inc_input_shape)) ).astype(dtype) input_window_np = np.zeros(input_window_shape, dtype=dtype) kernel_np = np.random.uniform(size=kernel_shape).astype(dtype) context_np = np.zeros(context_shape, dtype=dtype) output_window_np = np.zeros(output_window_shape, dtype=dtype) return (inc_input_np, input_window_np, kernel_np, context_np, output_window_np) inc_input_np, input_window_np, kernel_np, context_np, output_window_np = get_data() def check_device(target, dev): print(" Running on target: {}".format(target)) conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(target) with tvm.target.Target(target): out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis) s = tvm.topi.testing.get_injective_schedule(target)([out]) update_context = tvm.build(s, [inc_input, context, out], ta