text
stringlengths 1
2.05k
|
---|
import tvm.testing
from tvm |
import relay, IRModule
from utils.external_codegen |
import (
parametrize_external_codegen_checks,
set_external_func_attr,
check_aot_executor_result,
check_graph_executor_result,
check_vm_result,
)
logging.basicConfig(level=logging.INFO)
@parametrize_external_codegen_checks
def test_tir_external_generation_inline_without_target_instance(check_result):
shape = (8,)
x_data = np.random.randint(255, size=shape).astype("float32")
y_data = np.random.randint(255, size=shape).astype("float32")
inputs = {"x": x_data, "y": y_data}
x0 = relay.var("x0", shape=shape, dtype="float32")
y0 = relay.var("y0", shape=shape, dtype="float32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "example_target_hook", "replace_add_with_subtract")
x = relay.var("x", shape=(8,), dtype="float32")
y = relay.var("y", shape=(8,), dtype="float32")
call = relay.Call(f, [x, y])
func = IRModule.from_expr(call)
check_result(func, inputs, (8,), x_data - y_data)
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_tir_external_generation_outline_with_target_instance(check_result):
shape = (8,)
x_data = np.random.randint(255, size=shape).astype("float32")
y_data = np.random.randint(255, size=shape).astype("float32")
inputs = {"x": x_data, "y": y_data}
host_target = tvm.target.Target("llvm")
generic_target = tvm.target.Target("llvm", host=host_target)
extern_codegen_target = tvm.target.Target(
"example_target_hook -example_attribute=42", host=host_target
)
mod = tvm.parser.fromtext(
"""
def @main(%x: Tensor[(8), float32], %y: Tensor[(8), float32]) -> Tensor[(8), float32] {
@replace_add_with_subtract(%x, %y) * 2.0f
}
def @replace_add_with_subtract(%x: Tensor[(8), float32], %y: Tensor[(8), float32],
Inline=1,
Primitive=1, |
Compiler="example_target_hook",
global_symbol="replace_add_with_subtract") -> Tensor[(8), float32] {
%x + %y
}
"""
)
check_result(
mod,
inputs,
(8,),
(x_data - y_data - 42.0) * 2.0,
target=[generic_target, extern_codegen_target],
)
@pytest.mark.parametrize("check_result", [check_aot_executor_result, check_graph_executor_result])
def test_runtime_module_generation(check_result):
shape = (8,)
x_data = np.random.randint(255, size=shape).astype("float32")
y_data = np.random.randint(255, size=shape).astype("float32")
inputs = {"x": x_data, "y": y_data}
x0 = relay.var("x0", shape=shape, dtype="float32")
y0 = relay.var("y0", shape=shape, dtype="float32")
z = x0 + y0
func = relay.Function([x0, y0], z)
func = set_external_func_attr(func, "example_target_hook", "replace_add_with_subtract")
func = func.with_attr("tir_to_runtime", True)
x = relay.var("x", shape=(8,), dtype="float32")
y = relay.var("y", shape=(8,), dtype="float32")
call = relay.Call(func, [x, y])
func = IRModule.from_expr(call)
check_result(func, inputs, (8,), x_data * y_data)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay |
import testing
from tvm.relay.backend.interpreter |
import ConstructorValue
from tvm.relay |
import create_executor
from tvm.relay.prelude |
import Prelude, StaticTensorArrayOps
from tvm.relay.testing |
import count as count_, make_nat_value, make_nat_expr |
import numpy as np
def vmobj_to_list(mod, o, dtype="float32"):
_, tensor_nil, _, _, _, _, _, _, _ = mod.get_type(f"tensor_{dtype}_t")
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
if len(o) == 0:
if tensor_nil.tag == o.tag:
return [0]
return []
result = []
for f in o:
result.extend(vmobj_to_list(mod, f, dtype))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(mod, o.fields[1], dtype)
hd = vmobj_to_list(mod, o.fields[0], dtype)
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def check_tensor_array(ta_mod, ref_res, *args, dtype="float32", rtol=1e-5):
for kind in ["debug", "vm"]:
for target, dev in [("llvm", tvm.cpu(0))]:
if kind == "debug" and dev.device_type != tvm.cpu().device_type:
continue
result = relay.create_executor(kind, mod=ta_mod, device=dev, target=target).evaluate()(
*args
)
got = vmobj_to_list(ta_mod, result, dtype)
tvm.testing.assert_allclose(ref_res, got, rtol=rtol, atol=rtol)
@tvm.testing.uses_gpu
def test_tensor_expand_dims():
def run(dtype):
x = relay.var("x")
mod = tvm.IRModule()
p = Prelude(mod)
expand_dims_func = p.get_global_var("tensor_expand_dims", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
mod["main"] = relay.Function([x], expand |
_dims_func(tensor1(x)))
x_np = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)
expected = [np.expand_dims(x_np, axis=0)]
check_tensor_array(mod, expected, x_np)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_constructor():
def run(dtype):
x = relay.var("x")
mod = tvm.IRModule()
p = Prelude(mod)
tensor_array = p.get_global_var("tensor_array", dtype)
mod["main"] = relay.Function([x], tensor_array(x))
expected = np.array([0, 0, 0, 0, 0])
check_tensor_array(mod, expected, 5, dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_read():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
l = relay.var("l")
i = relay.var("i")
read_func = p.get_global_var("tensor_array_read", dtype)
tensor_array = p.get_global_var("tensor_array", dtype)
mod["main"] = relay.Function([l, i], read_func(tensor_array(l), i))
expected = [0]
check_tensor_array(mod, expected, *(1, 0), dtype=dtype)
check_tensor_array(mod, expected, *(5, 1), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_write():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
tensor_t = p.get_type("tensor_t", dtype)
v1 = relay.var("v1")
v2 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
init_tensor_array = tensor_array(relay.const(2))
write_func = p.get_global_var("tensor_array_write", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
tensor_array1 = write_func(init_tensor_array, relay.const(0), tensor1(v1))
tensor_array2 = write_func(tensor_array1, relay.const(1), tensor1(v2))
mod["main"] = relay.Function([v1, v2], tensor_array2)
expected = [3, 7]
check_tensor_array(mod, expected, *(3, 7), dtype=dtype)
run("float32") |
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_stack():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
tensor_t = p.get_type("tensor_t", dtype)
rlist = p.mod.get_global_type_var(f"List")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
write = p.get_global_var("tensor_array_write", dtype)
stack = p.get_global_var("tensor_array_stack", dtype)
v = relay.var("v", shape=(1,), dtype=dtype)
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor1(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor1(v))
tensor_array4 = stack(tensor_array3)
mod["main"] = relay.Function([v], tensor_array4, tensor_t())
t = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)
expected = [np.stack([t, t, t])]
check_tensor_array(mod, expected, t, dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_unstack():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
unstack_tensor1 = p.get_global_var("tensor_array_unstack_tensor1", dtype)
v = relay.var("v")
mod["main"] = relay.Function([v], unstack_tensor1(v))
t = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)
check_tensor_array(mod, t, t, dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_take():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
take = p.get_global_var("tensor_take", dtype)
tensor2 = p.get_tensor_ctor("tensor2", dtype)
v = relay.var("v")
lower = relay.var("lower")
upper = relay.var("upper")
mod["main"] = relay.Function([v, lower, upper], take(tensor2(v), lower, upper))
v_d |
ata = np.random.uniform(low=0.0, high=8.0, size=(10, 10)).astype(dtype)
expected = [np.take(v_data, range(2, 5), axis=0)]
check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)
expected = [np.take(v_data, range(0, 9), axis=0)]
check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_concatenate():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
concat = p.get_global_var("tensor_concatenate", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
v1 = relay.var("v1", shape=(tvm.tir.Any(),), dtype=dtype)
v2 = relay.var("v2", shape=(tvm.tir.Any(),), dtype=dtype)
mod["main"] = relay.Function([v1, v2], concat(tensor1(v1), tensor1(v2)))
v1_data = np.random.uniform(low=0.0, high=8.0, size=(5,)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(5,)).astype(dtype)
expected = [np.concatenate((v1_data, v2_data))]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_concat():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
v1 = relay.var("v1")
v2 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor_array1 = tensor_array(relay.const(2))
write_func = p.get_global_var("tensor_array_write", dtype)
concat_func = p.get_global_var("tensor_array_concat", dtype)
tensor1 = p.get_tensor_ctor("tensor2", dtype)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor1(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor1(v2))
tensor_array_concat = concat_func(tensor_array1)
mod["main"] = relay.Function([v1, v2], tensor_array_concat)
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.unif |
orm(low=0.0, high=8.0, size=(1, 3)).astype(dtype)
expected = [np.concatenate((v1_data, v2_data), axis=0)]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_scatter():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor_array1 = tensor_array(relay.const(3))
write_func = p.get_global_var("tensor_array_write", dtype)
scatter_func = p.get_global_var("tensor_array_scatter", dtype)
tensor2 = p.get_tensor_ctor("tensor2", dtype)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))
index = relay.var("index")
value_0 = relay.var("value_0")
value_1 = relay.var("value_1")
values_array = tensor_array(relay.const(2))
values_array = write_func(values_array, relay.const(0), tensor2(value_0))
values_array = write_func(values_array, relay.const(1), tensor2(value_1))
tensor_array_scatter = scatter_func(tensor_array1, index, values_array)
mod["main"] = relay.Function([v1, v2, v3, index, value_0, value_1], tensor_array_scatter)
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
index_data = np.array([0, 1], dtype="int32")
val1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
val2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
expected = [val1_data, val |
2_data, v3_data]
check_tensor_array(
mod,
expected,
*(v1_data, v2_data, v3_data, index_data, val1_data, val2_data),
dtype=dtype,
)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_split():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor_array1 = tensor_array(relay.const(3))
write_func = p.get_global_var("tensor_array_write", dtype)
split_func = p.get_global_var("tensor_array_split", dtype)
tensor2 = p.get_tensor_ctor("tensor2", dtype)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))
value = relay.var("value")
ta_len = relay.var("length")
tensor_array_split = split_func(tensor_array1, tensor2(value), ta_len)
mod["main"] = relay.Function([v1, v2, v3, value, ta_len], tensor_array_split)
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
value_data = np.random.uniform(low=0.0, high=8.0, size=(4, 3)).astype(dtype)
length_data = np.array([2, 2], dtype="int32")
expected = np.concatenate([value_data, v3_data])
expected = np.split(expected, indices_or_sections=[2, 4])
check_tensor_array(
mod, expected, *(v1_data, v2_data, v3_data, value_data, length_data), dtype=dtype
)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_static_tensor_take():
def run(dtype, shape):
mod = |
tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
take = p.get_global_var_static("tensor_take", dtype, shape)
tensor_constructor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
v = relay.var("v")
lower = relay.var("lower")
upper = relay.var("upper")
mod["main"] = relay.Function([v, lower, upper], take(tensor_constructor(v), lower, upper))
v_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.take(v_data, range(2, 5), axis=0)]
check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)
expected = [np.take(v_data, range(0, 9), axis=0)]
check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)
run("float32", [10, 10])
run("int32", [15, 11])
@tvm.testing.uses_gpu
def test_static_tensor_concatenate():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
concat = p.get_global_var_static("tensor_concatenate", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
v1 = relay.var("v1")
v2 = relay.var("v2")
mod["main"] = relay.Function([v1, v2], concat(tensor(v1), tensor(v2)))
v1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.concatenate((v1_data, v2_data))]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run(
"float32",
[
5,
],
)
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_expand_dims():
def run(dtype, shape):
x = relay.var("x")
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_o |
ps = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
expand_dims_func = p.get_global_var_static("tensor_expand_dims", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
mod["main"] = relay.Function([x], expand_dims_func(tensor(x)))
x_np = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.expand_dims(x_np, axis=0)]
check_tensor_array(mod, expected, x_np)
run("float32", [])
run(
"int32",
[
2,
],
)
@tvm.testing.uses_gpu
def test_static_tensor_array_constructor():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_constructor = p.get_name_static("tensor_constructor", dtype, shape)
assert tensor_constructor != None
run("float32", [1, 1])
@tvm.testing.uses_gpu
def test_static_tensor_array_read():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
np_data_list = []
ta_length = 3
for _ in range(ta_length):
np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))
v0 = relay.var("v0")
v1 = relay.var("v1")
v2 = relay.var("v2")
n = relay.var("n")
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
init_tensor_array = tensor_array(relay.const(ta_length))
read_func = p.get_global_var_static("tensor_array_read", dtype, shape)
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
tensor_array1 = write_func(ten |
sor_array0, relay.const(1), tensor(v1))
tensor_array2 = write_func(tensor_array1, relay.const(2), tensor(v2))
mod["main"] = relay.Function([v0, v1, v2, n], read_func(tensor_array2, n))
expected = [np_data_list[0]]
check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)
expected = [np_data_list[1]]
check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)
expected = [np_data_list[2]]
check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_write():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
ta_length = 2
np_data_list = [
np.random.uniform(0, 10, size=shape).astype(dtype) for _ in range(ta_length)
]
v0 = relay.var("v0")
v1 = relay.var("v1")
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
init_tensor_array = tensor_array(relay.const(ta_length))
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
mod["main"] = relay.Function([v0, v1], tensor_array1)
expected = np_data_list
check_tensor_array(mod, expected, *np_data_list, dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_unstack():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
unstack_tensor = p.get_global_var |
_static("tensor_array_unstack", dtype, shape)
v = relay.var("v")
mod["main"] = relay.Function([v], unstack_tensor(v))
t = np.random.uniform(low=0, high=10, size=shape).astype(dtype)
(*expected,) = t
check_tensor_array(mod, expected, t, dtype=dtype)
run("float32", [4])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_scatter():
def run(dtype, shape, indices_shape=None):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
if indices_shape is not None:
static_tensor_array_ops.define_tensor_array_scatter(indices_shape, True)
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor_array0 = tensor_array(relay.const(3))
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
scatter_func = p.get_global_var_static("tensor_array_scatter", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array1 = write_func(tensor_array0, relay.const(0), tensor(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))
index = relay.var("index")
value_0 = relay.var("value_0")
value_1 = relay.var("value_1")
values_array = tensor_array(relay.const(2))
values_array = write_func(values_array, relay.const(0), tensor(value_0))
values_array = write_func(values_array, relay.const(1), tensor(value_1))
tensor_array_scatter = scatter_func(tensor_array1, index, values_array)
mod["main"] = relay.Function([v1, v2, v3, index, value_0, value_1], tensor_array_scatter)
v1_data = np.random.uniform(low=0.0, high=8.0, s |
ize=shape).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
index_data = np.array([0, 1], dtype="int32")
val1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
val2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [val1_data, val2_data, v3_data]
check_tensor_array(
mod,
expected,
*(v1_data, v2_data, v3_data, index_data, val1_data, val2_data),
dtype=dtype,
)
run("float32", [2, 3])
run("int32", [2, 3])
run(
"float32",
[2, 3],
[
2,
],
)
@tvm.testing.uses_gpu
def test_static_tensor_array_split():
def run(dtype, shape, value_shape=None, lengths_shape=None):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
if value_shape is not None or lengths_shape is not None:
static_tensor_array_ops.define_tensor_array_split(value_shape, lengths_shape, False)
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
adt_shape = [
relay.Any(),
] + shape[1:]
test_ops = StaticTensorArrayOps(p, dtype, adt_shape)
test_ops.register()
tensor_array = test_ops.get_global_var("tensor_array")
tensor_array1 = tensor_array(relay.const(3))
write_func = test_ops.get_global_var("tensor_array_write")
split_ops = StaticTensorArrayOps(p, dtype, shape)
split_ops.register()
split_func = split_ops.get_global_var("tensor_array_split")
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, test_ops.shape)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
tensor_array1 = write_func(tensor_array1, relay. |
const(1), tensor(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))
value = relay.var("value")
ta_len = relay.var("length")
if value_shape is None:
tensor1 = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
else:
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, value_shape)
static_tensor_array_ops.register()
tensor1 = p.get_tensor_ctor_static("tensor_constructor", dtype, test_ops.shape)
tensor_array_split = split_func(tensor_array1, tensor1(value), ta_len)
mod["main"] = relay.Function([v1, v2, v3, value, ta_len], tensor_array_split)
v1_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
value_data = np.random.uniform(low=0.0, high=8.0, size=value_shape or shape).astype(dtype)
length_data = np.array([2, 2], dtype="int32")
expected = np.concatenate([value_data, v3_data])
expected = np.split(expected, indices_or_sections=[2, 4])
check_tensor_array(
mod, expected, *(v1_data, v2_data, v3_data, value_data, length_data), dtype=dtype
)
run("float32", [4, 3])
run("int32", [4, 3])
run(
"int32",
[relay.Any(), 3],
[4, 3],
[
2,
],
)
@tvm.testing.uses_gpu
def test_static_tensor_array_concat():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
v1 = relay.var("v1")
v2 = relay.var("v2")
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor_array1 = tensor_array(relay.const(2))
write_func = p.get_global_var_static("tensor_ar |
ray_write", dtype, shape)
concat_func = p.get_global_var_static("tensor_array_concat", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
tensor_array_concat = concat_func(tensor_array1)
mod["main"] = relay.Function([v1, v2], tensor_array_concat)
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(1, 3)).astype(dtype)
expected = [np.concatenate((v1_data, v2_data), axis=0)]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run("float32", [relay.Any(), 3])
run("int32", [relay.Any(), 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_gather():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
write = p.get_global_var_static("tensor_array_write", dtype, shape)
gather = p.get_global_var_static("tensor_array_gather", dtype, shape)
v = relay.var("v")
indice = relay.var("indice")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
out = gather(tensor_array3, indice)
mod["main"] = relay.Function([v, indice], out)
t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
indice_data = np.array([0, 2], dtype="int32")
expected = [np.stack([t, t])]
check_tensor_array( |
mod, expected, *(t, indice_data), dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_stack():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
write = p.get_global_var_static("tensor_array_write", dtype, shape)
stack = p.get_global_var_static("tensor_array_stack", dtype, shape)
v = relay.var("v")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
tensor_array4 = stack(tensor_array3)
mod["main"] = relay.Function([v], tensor_array4)
t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.stack([t, t, t])]
check_tensor_array(mod, expected, t, dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_get_data():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
np_data_list = []
ta_length = 3
for _ in range(ta_length):
np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))
v0 = relay.var("v0")
v1 = relay.var("v1")
v2 = relay.var("v2")
n = relay.var("n")
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
init_tensor_array = tensor_array(relay.const(ta_length)) |
read_func = p.get_global_var_static("tensor_array_read", dtype, shape)
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
get_data_func = p.get_global_var_static("tensor_get_data", dtype, shape)
tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
tensor_array2 = write_func(tensor_array1, relay.const(2), tensor(v2))
mod["main"] = relay.Function([v0, v1, v2, n], get_data_func(read_func(tensor_array2, n)))
expected = [np_data_list[0]]
check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)
expected = [np_data_list[1]]
check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)
expected = [np_data_list[2]]
check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)
run("float32", [])
run("int32", [2, 3])
if __name__ == "__main__":
pytest.main([__file__]) |
"""Unit tests for testing ToMixedPrecision pass"""
from typing |
import Any, Dict, List |
import numpy as np |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay.testing |
import lstm
from tvm.relay.transform |
import InferType, ToMixedPrecision, mixed_precision
target_precision = tvm.testing.parameter(
pytest.param("float16"),
pytest.param("bfloat16"),
ids=["float16", "bfloat16"],
)
def run_module(mod: tvm.runtime.Module, mod_params: Dict[str, Any]) -> List:
dev = tvm.device("llvm", 0)
result = relay.create_executor("debug", mod, device=dev, target="llvm").evaluate()(**mod_params)
if isinstance(result, tvm.runtime.container.ADT):
result = [r.numpy() for r in result]
return result
else:
return [result.numpy()]
def verify_mixed_precision_output_close(
mod: tvm.runtime.Module,
mod_params: Dict[str, Any],
mixed_precision_dtype="float16",
rtol: float = 1e-3,
atol: float = 0,
keep_orig_output_dtype=False,
) -> tvm.runtime.Module:
mod = InferType()(mod)
result_fp32 = run_module(mod, mod_params)
if not keep_orig_output_dtype:
amp_mod = ToMixedPrecision(mixed_precision_dtype)(mod)
result_amp = run_module(amp_mod, mod_params)
else:
with tvm.transform.PassContext(
config={"relay.ToMixedPrecision.keep_orig_output_dtype": True}
):
amp_mod = ToMixedPrecision(mixed_precision_dtype)(mod)
result_amp = run_module(amp_mod, mod_params)
if mixed_precision_dtype != "bfloat16":
for fp32, amp in zip(result_fp32, result_amp):
np.testing.assert_allclose(fp32, amp, rtol=rtol, atol=atol)
if keep_orig_output_dtype:
assert (
np.array(result_amp).dtype == np.array(result_fp32).dtype
), "output type and original type mismatch"
return amp_mod
def test_lstm(target_precision):
"""A small stress test on a single unrolled lstm unit.
Has internal functions and let statements the pass must work on.
"""
units = 4
iterations = 5
mod, mod_params = lstm.get_workload(iterations=iterations, num_hidden=units)
for i in range(iterations):
mod_params["data" if i == 0 else f"dat |
a{i}"] = np.random.uniform(
-10, 10, (1, units)
).astype("float32")
verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, rtol=0.01, atol=0.01
)
def test_lstm_float64():
"""Tests if can handle other mixed precision types.
As a toy example show can convert graph to float64 and have it run.
It doesn't really make sense to do it, this just shows we can change
the target mixed_precision_dtype.
"""
units = 3
iterations = 5
mod, mod_params = lstm.get_workload(iterations=iterations, num_hidden=units)
for i in range(iterations):
mod_params["data" if i == 0 else f"data{i}"] = np.random.uniform(
-10, 10, (1, units)
).astype("float32")
verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype="float64", rtol=0.01, atol=0.01
)
def test_convert_single_conv(target_precision):
"""Conv is a green listed operation meaning it will always use fp16 workload.
By default it accumulates to fp32 and outputs fp16.
"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
mod = tvm.IRModule.from_expr(conv)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod,
mod_params,
mixed_precision_dtype=target_precision,
atol=0.01,
rtol=1e-3,
keep_orig_output_dtype=True,
)
expected_mod = tvm.IRModule.from_expr(
relay.cast(
relay.nn.conv2d(
relay.cast(data, target_precision), |
relay.cast(weight, target_precision),
strides=(1, 1),
padding=(1, 1),
out_dtype=target_precision,
),
"float32",
)
)
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_convert_single_conv_fp64():
"""As above but checks choosing a mixed_precision_type other than FP16 works"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
mod = tvm.IRModule.from_expr(conv)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype="float64", atol=0.01, rtol=1e-3
)
expected_mod = tvm.IRModule.from_expr(
relay.nn.conv2d(
relay.cast(data, "float64"),
relay.cast(weight, "float64"),
strides=(1, 1),
padding=(1, 1),
out_dtype="float64",
),
)
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_convert_conv_bn(target_precision):
"""Conv is green and batch norm is gray. As Conv should output fp16 batch_norm should be green."""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), |
padding=(1, 1), out_dtype="float32")
bn_shape = [5]
gamma = relay.var("gamma", shape=bn_shape)
beta = relay.var("beta", shape=bn_shape)
moving_mean = relay.var("moving_mean", shape=bn_shape)
moving_var = relay.var("moving_var", shape=bn_shape)
bn = relay.nn.batch_norm(conv, gamma, beta, moving_mean, moving_var)
mod = tvm.IRModule.from_expr(bn[0])
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
"gamma": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
"beta": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
"moving_mean": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
"moving_var": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.025, rtol=0.01
)
data = relay.cast(relay.var("data", shape=data_shape), target_precision)
weight = relay.cast(relay.var("weight", shape=weight_shape), target_precision)
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=target_precision)
bn_shape = [5]
gamma = relay.cast(relay.var("gamma", shape=bn_shape), target_precision)
beta = relay.cast(relay.var("beta", shape=bn_shape), target_precision)
moving_mean = relay.cast(relay.var("moving_mean", shape=bn_shape), target_precision)
moving_var = relay.cast(relay.var("moving_var", shape=bn_shape), target_precision)
bn = relay.nn.batch_norm(conv, gamma, beta, moving_mean, moving_var)
expected_mod = tvm.IRModule.from_expr(bn[0])
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_do_not_convert_softmax(target_precision):
"""Softma |
x is a red listed operation and therefore should never be fp16."""
shape = [1, 2, 3]
a = relay.var("a", shape=shape)
b = relay.nn.softmax(a)
mod = tvm.IRModule.from_expr(b)
mod = tvm.relay.transform.InferType()(mod)
out_mod = ToMixedPrecision(target_precision)(mod)
orig_mod = tvm.relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(orig_mod, out_mod)
def test_do_not_convert_arange(target_precision):
"""Arange is a red listed operation and therefore should never be fp16."""
dtype = "float32"
arange = relay.arange(relay.const(1, dtype), relay.const(128, dtype))
mod = tvm.IRModule.from_expr(arange)
out_mod = ToMixedPrecision(target_precision)(mod)
orig_mod = tvm.relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(orig_mod, out_mod)
def test_do_not_convert_summation(target_precision):
"""Ops that could involve a large summation are not allowed in fp16."""
shape = [1, 3, 16, 16]
a = relay.var("a", shape=shape)
ops = [
relay.sum,
relay.mean,
relay.nn.global_avg_pool2d,
lambda inp: relay.nn.adaptive_avg_pool2d(inp, (1, 1)),
]
for op in ops:
mod = tvm.IRModule.from_expr(op(a))
out_mod = ToMixedPrecision(target_precision)(mod)
orig_mod = tvm.relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(orig_mod, out_mod)
def test_green_gray_propagates_simple(target_precision):
"""Conv is a green listed operation, while addition is gray.
As Conv outputs fp16 the add should be done in fp16.
"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
conv = conv + conv
mod = tvm.IRModule.from_expr(conv)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.un |
iform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
conv_expr = relay.nn.conv2d(
relay.cast(data, target_precision),
relay.cast(weight, target_precision),
strides=(1, 1),
padding=(1, 1),
out_dtype=target_precision,
)
expected_mod = tvm.IRModule.from_expr(conv_expr + conv_expr)
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_green_red_not_use_extraneous_cast(target_precision):
"""Conv. is a green listed operation, while softmax is red.
Conv. also by default accumulates to fp32 but outputs fp16.
We want to avoid a situation where we have extraneous casts.
E.g. because softmax wants to operate on FP32 we might have
conv (FP32) -> cast (FP16) -> cast (FP32) -> softmax (FP32)
To get around this internally when we cast in the pass we cache
the output nodes and the reverse of the cast back to the original
node. For example casting the `conv (FP32)` to FP16 would produce:
`conv (FP32) -> cast (FP16)`
As the outputs. Now anytime we try to cast the `conv (FP32)` node
to FP16 it would return the cached result instead of a new cast node:
`conv (FP32) -> cast (FP16)`
Furthermore, if we try to cast the `cast (FP16)` node back to FP32 it
would just return
`conv (FP32)`.
This test makes sure this behavior occurs.
"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
result = relay.nn.softmax(conv) |
mod = tvm.IRModule.from_expr(result)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=1e-3
)
conv = relay.cast(
relay.nn.conv2d(
relay.cast(data, target_precision),
relay.cast(weight, target_precision),
strides=(1, 1),
padding=(1, 1),
out_dtype=target_precision,
),
"float32",
)
result = relay.nn.softmax(conv)
expected_mod = tvm.IRModule.from_expr(result)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, amp_mod)
def test_red_gray_propagates_simple(target_precision):
"""Everything after a softmax should be in FP32 (exception green colored ops)"""
shape = [1, 2, 3]
a = relay.var("a", shape=shape)
b = relay.nn.softmax(a)
c = b + b
mod = tvm.IRModule.from_expr(c)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"a": np.random.uniform(-1, 1, size=shape).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.0, rtol=0.0
)
assert tvm.ir.structural_equal(mod, output_mod)
def test_let_statement_simple(target_precision):
"""A 'simple' let statement example.
Noticeable is the mutation of the bound variable types.
"""
var1 = relay.var("var1", shape=[1, 20])
var2 = relay.var("var2", shape=[1, 20])
data = relay.var("data", shape=[1, 20])
weight = relay.var("weight", shape=[20, 20])
r1 = var1 + var1
r2 = var2 + var2
let2 = relay.Let(var2, relay.nn.dense(r1, weight, units=20), r2)
let1 = relay.Let(var1, relay.nn.dense(data, weight, units=20), let2)
mod = tvm.IRModule.from_expr(let1)
mod_params = { |
"data": np.random.uniform(-1, 1, size=[1, 20]).astype("float32"),
"weight": np.random.uniform(-1, 1, size=[20, 20]).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.05, rtol=0.15
)
var1 = relay.var("var1", shape=[1, 20], dtype=target_precision)
var2 = relay.var("var2", shape=[1, 20], dtype=target_precision)
data = relay.cast(relay.var("data", shape=[1, 20]), target_precision)
weight = relay.cast(relay.var("weight", shape=[20, 20]), target_precision)
r1 = var1 + var1
r2 = var2 + var2
let2 = relay.Let(
var2,
relay.nn.dense(r1, weight, units=20, out_dtype=target_precision),
r2,
)
let1 = relay.Let(
var1,
relay.nn.dense(data, weight, units=20, out_dtype=target_precision),
let2,
)
expected_mod = tvm.IRModule.from_expr(let1)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
def test_where_simple(target_precision):
data = relay.var("data", shape=[1, 20])
weight = relay.var("weight", shape=[20, 20])
a = relay.nn.dense(data, weight, units=20)
b = relay.where(data, a, a)
mod = tvm.IRModule.from_expr(b)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 20]).astype("float32"),
"weight": np.random.uniform(-1, 1, size=[20, 20]).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
data = relay.cast(relay.var("data", shape=[1, 20]), target_precision)
weight = relay.cast(relay.var("weight", shape=[20, 20]), target_precision)
a = relay.nn.dense(data, weight, units=20, out_dtype=target_precision)
b = relay.where(data, a, a)
expected_mod = tvm.IRModule.from_expr(b)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
d |
ef test_batch_matmul_simple(target_precision):
"""Batch matmul is a special case where we try to accumulate to fp16.
This is due to the fact heterogenous accumulation dtypes does not work
on all platforms at the moment.
"""
data = relay.var("data", shape=[1, 1, 20])
weight = relay.var("weight", shape=[1, 20, 20])
a = relay.nn.batch_matmul(data, weight)
mod = tvm.IRModule.from_expr(a)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 1, 20]).astype("float32"),
"weight": np.random.uniform(-1, 1, size=[1, 20, 20]).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
data = relay.cast(relay.var("data", shape=[1, 1, 20]), target_precision)
weight = relay.cast(relay.var("weight", shape=[1, 20, 20]), target_precision)
a = relay.nn.batch_matmul(data, weight, out_dtype=target_precision)
expected_mod = tvm.IRModule.from_expr(a)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
def test_convert_follow_node_with_integer_arguments(target_precision):
"""Tests the conversion of a follow op with integer arguments + constant float args.
The follow op should convert the floating point argument into fp16 as constants/vars
will always be converted if safe to do so.
"""
data = relay.var("data", shape=[1, 10], dtype="float32")
indices = relay.var("indices", shape=[1, 1], dtype="int32") + relay.const(0, dtype="int32")
take = relay.take(data, indices, axis=0)
mod = tvm.IRModule.from_expr(take)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 10]).astype("float32"),
"indices": np.array([[0]]).astype("int32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
data = relay.cast(relay.var("data", shape=[1, |
10]), target_precision)
take = relay.take(data, indices, axis=0)
expected_mod = tvm.IRModule.from_expr(take)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
import TypeFunctor, TypeMutator, TypeVisitor
from tvm.relay.ty |
import (
TypeVar,
IncompleteType,
TensorType,
FuncType,
TupleType,
TypeRelation,
RefType,
GlobalTypeVar,
TypeCall,
)
from tvm.relay.adt |
import TypeData
def check_visit(typ):
try:
ef = TypeFunctor()
ef.visit(typ)
assert False
except NotImplementedError:
pass
ev = TypeVisitor()
ev.visit(typ)
tvm.ir.assert_structural_equal(TypeMutator().visit(typ), typ, map_free_vars=True)
def test_type_var():
tv = TypeVar("a")
check_visit(tv)
def test_incomplete_type():
it = IncompleteType()
check_visit(it)
def test_tensor_type():
tt = TensorType([])
check_visit(tt)
def test_func_type():
tv = TypeVar("tv")
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
ft = FuncType([tt], tt, type_params=[tv])
check_visit(ft)
def test_tuple_type():
tt = TupleType([TupleType([])])
check_visit(tt)
def test_type_relation():
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
attrs = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
tp = TypeVar("tp")
tf = FuncType([], TupleType([]), [], [])
tt = TensorType([1, 2, 3], "float32")
tr = TypeRelation(func, [tp, tf, tt], 2, attrs)
check_visit(tr)
def test_ref_type():
rt = RefType(TupleType([]))
check_visit(rt)
def test_global_type_var():
gtv = GlobalTypeVar("gtv")
check_visit(gtv)
def test_type_call():
tc = TypeCall(GlobalTypeVar("tf"), [TupleType([])])
check_visit(tc)
def test_type_data():
td = TypeData(GlobalTypeVar("td"), [TypeVar("tv")], [])
check_visit(td)
if __name__ == "__main__":
test_type_var()
test_incomplete_type()
test_tensor_type()
test_func_type()
test_tuple_type()
test_type_relation()
test_ref_type()
test_global_type_var()
test_type_call()
test_type_data() |
"""Test that type checker correcly computes types
for expressions.
""" |
import pytest |
import tvm
from tvm |
import IRModule, parser, relay, te
from tvm.relay |
import analysis, op, transform
from tvm.relay.op |
import op as _op |
import numpy as np
def infer_mod(mod, annotate_spans=True):
if annotate_spans:
mod = relay.transform.AnnotateSpans()(mod)
mod = transform.InferType()(mod)
return mod
def infer_expr(expr):
transform.InferTypeLocal(expr)
return expr
def assert_has_type(expr, typ, mod=None):
if not mod:
mod = tvm.IRModule({})
mod["main"] = expr
mod = infer_mod(mod)
checked_expr = mod["main"]
checked_type = checked_expr.checked_type
if checked_type != typ:
raise RuntimeError("Type mismatch %s vs %s" % (checked_type, typ))
def initialize_box_adt(mod):
box = relay.GlobalTypeVar("box")
tv = relay.TypeVar("tv")
constructor = relay.Constructor("constructor", [tv], box)
data = relay.TypeData(box, [tv], [constructor])
mod[box] = data
return box, constructor
def test_monomorphic_let():
"Program: let %x = 1; %x"
sb = relay.ScopeBuilder()
x = relay.var("x", dtype="float64", shape=())
x = sb.let(x, relay.const(1.0, "float64"))
sb.ret(x)
xchecked = infer_expr(sb.get())
assert xchecked.checked_type == relay.scalar_type("float64")
def test_single_op():
"Program: fn (%x : float32) { let %t1 = f(%x); %t1 }"
x = relay.var("x", shape=[])
func = relay.Function([x], op.log(x))
ttype = relay.TensorType([], dtype="float32")
assert_has_type(func, relay.FuncType([ttype], ttype))
def test_add_broadcast_op():
"""
Program:
fn (%x: Tensor[(10, 4), float32], %y: Tensor[(5, 10, 1), float32])
-> Tensor[(5, 10, 4), float32] {
%x + %y
}
"""
x = relay.var("x", shape=(10, 4))
y = relay.var("y", shape=(5, 10, 1))
z = x + y
func = relay.Function([x, y], z)
t1 = relay.TensorType((10, 4), "float32")
t2 = relay.TensorType((5, 10, 1), "float32")
t3 = relay.TensorType((5, 10, 4), "float32")
expected_ty = relay.FuncType([t1, t2], t3)
assert_has_type(func, expected_ty)
def test_dual_op():
"""Program:
fn (%x : Tensor[( |
10, 10), float32]) {
let %t1 = log(x);
let %t2 = add(%t1, %x);
%t1
}
"""
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", relay.log(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_decl():
"""Program:
def @f(%x : Tensor[(10, 10), float32]) {
log(%x)
}
"""
tp = relay.TensorType((10, 10))
x = relay.var("x", tp)
f = relay.Function([x], relay.log(x))
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_recursion():
"""
Program:
def @f(%n: int32, %data: float32) -> float32 {
if (%n == 0) {
%data
} else {
@f(%n - 1, log(%data))
}
}
"""
sb = relay.ScopeBuilder()
f = relay.GlobalVar("f")
ti32 = relay.scalar_type("int32")
tf32 = relay.scalar_type("float32")
n = relay.var("n", ti32)
data = relay.var("data", tf32)
with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
sb.ret(data)
with sb.else_scope():
sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
mod = tvm.IRModule()
mod[f] = relay.Function([n, data], sb.get())
mod = infer_mod(mod)
assert "@f(%1, %2)" in mod.astext()
assert mod["f"].checked_type == relay.FuncType([ti32, tf32], tf32)
def test_incomplete_call():
tt = relay.scalar_type("int32")
x = relay.var("x", tt)
f_type = relay.FuncType([tt], tt)
f = relay.var("f")
func = relay.Function([x, f], relay.Call(f, [x]), tt)
ft = infer_expr(func)
assert ft.checked_type == relay.FuncType([tt, f_type], tt)
def test_higher_order_argument():
a = relay.TypeVar("a")
x = relay.Var("x", a)
id_func = relay.Function([x], x, a, [a])
b = relay.TypeVar("b")
f = relay.Var("f", relay.Func |
Type([b], b))
y = relay.Var("y", b)
ho_func = relay.Function([f, y], f(y), b, [b])
ho_call = ho_func(id_func, relay.const(0, "int32"))
hc = infer_expr(ho_call)
expected = relay.scalar_type("int32")
assert hc.checked_type == expected
def test_higher_order_return():
a = relay.TypeVar("a")
x = relay.Var("x", a)
id_func = relay.Function([x], x, a, [a])
b = relay.TypeVar("b")
nested_id = relay.Function([], id_func, relay.FuncType([b], b), [b])
ft = infer_expr(nested_id)
assert ft.checked_type == relay.FuncType([], relay.FuncType([b], b), [b])
def test_higher_order_nested():
a = relay.TypeVar("a")
x = relay.Var("x", a)
id_func = relay.Function([x], x, a, [a])
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
b = relay.TypeVar("b")
z = relay.Var("z")
top = relay.Function(
[f], relay.If(f(), id_func, relay.Function([z], z)), relay.FuncType([b], b), [b]
)
expected = relay.FuncType([choice_t], relay.FuncType([b], b), [b])
ft = infer_expr(top)
assert ft.checked_type == expected
def test_tuple():
tp = relay.TensorType((10,))
x = relay.var("x", tp)
res = relay.Tuple([x, x])
assert infer_expr(res).checked_type == relay.TupleType([tp, tp])
def test_ref():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
r = relay.RefCreate(x)
st = relay.scalar_type("float32")
assert infer_expr(r).checked_type == relay.RefType(st)
g = relay.RefRead(r)
assert infer_expr(g).checked_type == st
w = relay.RefWrite(r, y)
assert infer_expr(w).checked_type == relay.TupleType([])
def test_free_expr():
x = relay.var("x", "float32")
y = relay.add(x, x)
yy = infer_expr(y)
assert tvm.ir.structural_equal(yy.args[0], x, map_free_vars=True)
assert yy.checked_type == relay.scalar_type("float32")
assert x.vid.same_as(yy.args[0].vid)
def test_type_args():
x = relay.var("x", shape=(10, 10))
y = relay.v |
ar("y", shape=(1, 10))
z = relay.add(x, y)
mod = infer_mod(IRModule.from_expr(z))
mod = infer_mod(mod, annotate_spans=False)
ty_args = mod["main"].body.type_args
assert len(ty_args) == 2
assert ty_args[0].dtype == "float32"
assert ty_args[1].dtype == "float32"
sh1 = ty_args[0].shape
sh2 = ty_args[1].shape
assert sh1[0].value == 10
assert sh1[1].value == 10
assert sh2[0].value == 1
assert sh2[1].value == 10
def test_global_var_recursion():
mod = tvm.IRModule({})
gv = relay.GlobalVar("main")
x = relay.var("x", shape=[])
tt = relay.scalar_type("float32")
func = relay.Function([x], relay.Call(gv, [x]), tt)
mod[gv] = func
mod = infer_mod(mod)
func_ty = mod["main"].checked_type
assert func_ty == relay.FuncType([tt], tt)
def test_equal():
i = relay.var("i", shape=[], dtype="int32")
eq = op.equal(i, relay.const(0, dtype="int32"))
func = relay.Function([i], eq)
ft = infer_expr(func)
expected = relay.FuncType([relay.scalar_type("int32")], relay.scalar_type("bool"))
assert ft.checked_type == expected
assert ft.checked_type == relay.FuncType(
[relay.scalar_type("int32")], relay.scalar_type("bool")
)
def test_constructor_type():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
a = relay.TypeVar("a")
x = relay.Var("x", a)
func = relay.Function([x], constructor(x), box(a), [a])
mod["main"] = func
mod = infer_mod(mod)
func_ty = mod["main"].checked_type
box = mod.get_global_type_var("box")
expected = relay.FuncType([a], box(a), [a])
assert func_ty == expected
def test_constructor_call():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
box_unit = constructor(relay.Tuple([]))
box_constant = constructor(relay.const(0, "float32"))
func = relay.Function([], relay.Tuple([box_unit, box_constant]))
mod["main"] = func
mod = infer_mod(mod)
ret_type = mod["main"].checked_type.ret_type.field |
s
box = mod.get_global_type_var("box")
expected1 = box(relay.TupleType([]))
expected2 = box(relay.TensorType((), "float32"))
assert ret_type[0] == expected1
assert ret_type[1] == expected2
def test_adt_match():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
v = relay.Var("v", relay.TensorType((), "float32"))
match = relay.Match(
constructor(relay.const(0, "float32")),
[
relay.Clause(
relay.PatternConstructor(constructor, [relay.PatternVar(v)]), relay.Tuple([])
),
relay.Clause(relay.PatternWildcard(), relay.Tuple([])),
],
)
func = relay.Function([], match)
mod["main"] = func
mod = infer_mod(mod)
actual = mod["main"].checked_type.ret_type
assert actual == relay.TupleType([])
def test_adt_match_type_annotations():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
tt = relay.TensorType((2, 2), "float32")
x = relay.Var("x")
mv = relay.Var("mv", tt)
match = relay.Match(
constructor(x),
[
relay.Clause(
relay.PatternConstructor(constructor, [relay.PatternVar(mv)]), relay.Tuple([])
)
],
)
mod["main"] = relay.Function([x], match)
mod = infer_mod(mod)
ft = mod["main"].checked_type
assert ft == relay.FuncType([tt], relay.TupleType([]))
def test_let_polymorphism():
id = relay.Var("id")
xt = relay.TypeVar("xt")
x = relay.Var("x", xt)
body = relay.Tuple([id(relay.const(1)), id(relay.Tuple([]))])
body = relay.Let(id, relay.Function([x], x, xt, [xt]), body)
body = infer_expr(body)
int32 = relay.TensorType((), "int32")
tvm.ir.assert_structural_equal(body.checked_type, relay.TupleType([int32, relay.TupleType([])]))
def test_type_arg_infer():
code = """
def @id[A](%x: A) -> A {
%x
}
def @main(%f: float32) -> float32 {
@id(%f)
}
"""
mod = tvm.parser.fromtext(code)
mod = t |
ransform.InferType()(mod)
tvm.ir.assert_structural_equal(mod["main"].body.type_args, [relay.TensorType((), "float32")])
def test_dynamic_function():
dy_tt = relay.TensorType([relay.Any()], "float32")
s_tt = relay.TensorType([10], "float32")
x = relay.Var("x", dy_tt)
f = relay.Function([x], x + x)
y = relay.Var("y", s_tt)
c = f(y)
mod = tvm.IRModule()
mod["main"] = relay.Function([y], c)
mod = transform.InferType()(mod)
assert mod["main"].params[0].checked_type == s_tt
def test_custom_op_infer():
"""Tests infer type for custom_op"""
op_name = "custom_log"
_op.register(op_name, r"code(cal log of a tensor.)code")
_op.get(op_name).set_num_inputs(1)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).add_type_rel("Identity")
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
def clog(x):
return relay.Call(_op.get(op_name), [x])
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", clog(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_custom_add_broadcast_op():
"""Tests infer type for broadcast custom_op"""
op_name = "custom_broadcast_add"
_op.register(op_name, r"code(Add two tensor with inner broadcasting.)code")
_op.get(op_name).set_num_inputs(2)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).add_argument("data_1", "Tensor", "The input data tensor.")
_op.get(op_name).add_type_rel("Broadcast")
_op.get(op_name).set_support_level(1)
_op.register_stateful(op_name, False)
def broadcast_add(x, y):
return relay.Call(_op.get(op_name), [x, y])
x = relay.var("x", shape=(10, 4))
y = r |
elay.var("y", shape=(5, 10, 1))
z = broadcast_add(x, y)
func = relay.Function([x, y], z)
t1 = relay.TensorType((10, 4), "float32")
t2 = relay.TensorType((5, 10, 1), "float32")
t3 = relay.TensorType((5, 10, 4), "float32")
expected_ty = relay.FuncType([t1, t2], t3)
assert_has_type(func, expected_ty)
def test_custom_op_rel_infer():
"""Tests infer type for custom_op"""
def custom_log1_rel(arg_types, attrs):
assert len(arg_types) == 1, "type relation arg number mismatch!"
if attrs:
assert isinstance(attrs, DictAttrs)
inputa_type = arg_types[0]
return relay.TensorType(inputa_type.shape, inputa_type.dtype)
op_name = "custom_log1"
_op.register(op_name, r"code(cal log of a tensor.)code")
_op.get(op_name).set_num_inputs(1)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).set_attrs_type_key("DictAttrs")
_op.get(op_name).add_type_rel("custom_log1", custom_log1_rel)
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
def clog(x):
return relay.Call(_op.get(op_name), [x])
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", clog(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_custom_op_rel_infer_exception():
"""Tests infer type for custom_op"""
def custom_log1_rel(arg_types, attrs):
assert len(arg_types) == 2, "type relation arg number mismatch!"
return None
op_name = "custom_log2"
_op.register(op_name, r"code(cal log of a tensor.)code")
_op.get(op_name).set_num_inputs(1)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).set_attrs_type_key("DictAttrs")
_op. |
get(op_name).add_type_rel("custom_log2", custom_log1_rel)
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
def clog(x):
return relay.Call(_op.get(op_name), [x])
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", clog(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
with pytest.raises(tvm.error.TVMError) as cm:
fchecked = infer_expr(f)
assert "type relation arg number mismatch" in str(cm.execption)
def test_repeat_register():
op_name = "custom_log3"
_op.register(op_name, r"code(cal log of a tensor.)code")
with pytest.raises(tvm.error.TVMError) as cm:
_op.register(op_name)
assert "Operator custom_log3 is registered before" in str(cm.execption)
def test_argreduce_infer_return_type():
x_shape = (1, 1)
broadcast_shape = [1, 1]
shape_dtypes = [("int32", lambda x: np.int32(x)), ("int64", lambda x: np.int64(x))]
for (sdtype, conv) in shape_dtypes:
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype=sdtype))
argmax = relay.op.argmax(broadcast_to, axis=[1])
f = relay.Function([x], argmax)
assert_has_type(
f,
relay.FuncType(
[relay.TensorType(broadcast_shape, "float32")],
relay.TensorType([conv(1)], dtype=sdtype),
),
)
for (sdtype, conv) in shape_dtypes:
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype=sdtype))
argmin = relay.op.argmin(broadcast_to, axis=[1])
f = relay.Function([x], argmin)
assert_has_type(
f,
relay.FuncType(
[relay.TensorType(broadcast_sh |
ape, "float32")],
relay.TensorType([conv(1)], dtype=sdtype),
),
)
if __name__ == "__main__": |
import sys
pytest.main(sys.argv) |
import tvm
from tvm |
import relay
from tvm.relay |
import testing |
import pytest |
import numpy as np
def make_rel(name, args, num_inputs=None, attrs=None):
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation." + name)
if num_inputs is None:
num_inputs = len(args) - 1
return relay.ty.TypeRelation(func, args, num_inputs, attrs)
def make_solver():
solver = relay.analysis._ffi_api._test_type_solver()
solver.Solve = solver("Solve")
solver.Unify = solver("Unify")
solver.Resolve = solver("Resolve")
solver.AddConstraint = solver("AddConstraint")
def gen_type(name, args, out=None):
out = out if out else relay.ty.IncompleteType()
solver.AddConstraint(make_rel(name, args + [out]))
return out
solver.gen_type = gen_type
return solver
def test_bcast():
solver = make_solver()
t0 = relay.ty.TensorType((10, 20), "float32")
t1 = relay.ty.TensorType((10, 1), "float32")
tc = relay.ty.TensorType((10, 1, 1), "float32")
t2 = solver.gen_type("Broadcast", [t0, t1])
t3 = solver.gen_type("Identity", [t2])
t4 = solver.gen_type("Broadcast", [t3, tc])
assert solver.Solve()
assert solver.Resolve(t2) == relay.ty.TensorType((10, 20), "float32")
assert solver.Resolve(t4) == relay.ty.TensorType((10, 10, 20), "float32")
def test_backward_solving():
solver = make_solver()
t0 = relay.ty.TensorType((10, 20), "float32")
tc = relay.ty.TensorType((10, 1, 1), "float32")
t1 = relay.ty.IncompleteType()
t3 = solver.gen_type("Broadcast", [t0, t1])
t2 = solver.gen_type("Identity", [t1], out=tc)
assert solver.Solve()
assert solver.Resolve(t3) == relay.ty.TensorType((10, 10, 20), "float32")
def test_unify_tuple():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.TensorType((10, 20), "float32")
tup1 = relay.ty.TupleType([t1, t2])
tup2 = relay.ty.TupleType([t3, t3])
unified = solver.Unify(tup1, tup2)
assert unified == tup2
def test_unify_global_type_var():
solver = make_solver()
gtv = relay. |
GlobalTypeVar("gtv")
unified = solver.Unify(gtv, gtv)
assert unified == gtv
def test_unify_typecall():
solver = make_solver()
gtv = relay.GlobalTypeVar("gtv")
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.TensorType((10, 20), "float32")
tc1 = relay.ty.TypeCall(gtv, [t1, t2])
tc2 = relay.ty.TypeCall(gtv, [t3, t3])
unified = solver.Unify(tc1, tc2)
assert unified == tc2
def test_unify_functype():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
unit = relay.ty.TupleType([])
tensor1 = relay.ty.TensorType((10, 20), "float32")
tensor2 = relay.ty.TensorType((10,), "float32")
ft1 = relay.ty.FuncType([t1, t2], t3)
ft2 = relay.ty.FuncType([tensor1, tensor2], unit)
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_recursive_unify():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
tensor1 = relay.ty.TensorType((10, 10, 20), "float32")
tensor2 = relay.ty.TensorType((10, 20), "float32")
tensor3 = relay.ty.TensorType((10,), "float32")
tup1 = relay.ty.TupleType([relay.ty.TupleType([t1, t2]), t2])
tup2 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor2])
ft1 = relay.ty.FuncType([tup1, t3], t3)
ft2 = relay.ty.FuncType([tup2, tensor3], tensor3)
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_unify_vars_under_tuples():
solver = make_solver()
t1 = relay.ty.IncompleteType()
tup1 = relay.ty.TupleType([t1, t1])
unified = solver.Unify(tup1, tup1)
assert unified == tup1
t2 = relay.ty.IncompleteType()
tup2 = relay.ty.TupleType([t2, t2])
tup3 = relay.ty.TupleType([t1, t2])
tup4 = relay.ty.TupleType([t2, t1])
unified = solver.Unify(tup3, tup4)
assert unified == tup1 or unified == tup2
def test_binding_over_typevars(): |
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
a = relay.ty.TypeVar("a")
b = relay.ty.TypeVar("b")
c = relay.ty.TypeVar("c")
d = relay.ty.TypeVar("d")
ft1 = relay.ty.FuncType([t1], t2, [c, d])
ft2 = relay.ty.FuncType([a], b, [a, b])
unified = solver.Unify(ft1, ft2)
assert unified == solver.Resolve(ft1)
def test_recursive_backward_solving():
solver = make_solver()
tensor1 = relay.ty.TensorType((10, 20), "float32")
tensor2 = relay.ty.TensorType((10, 1, 1), "float32")
tensor3 = relay.ty.TensorType((10,), "float32")
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
tup1 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor3])
tup2 = relay.ty.TupleType([relay.ty.TupleType([t1, t2]), t3])
solver.gen_type("Identity", [tup1], out=tup2)
assert solver.Solve()
assert solver.Resolve(tup2) == tup1
def test_backward_solving_after_child_update():
solver = make_solver()
tensor1 = relay.ty.TensorType((10, 20), "float32")
tensor2 = relay.ty.TensorType((10, 1, 1), "float32")
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
tup1 = relay.ty.TupleType([t1, t2])
tup2 = relay.ty.TupleType([t1, t3])
tup_concrete = relay.ty.TupleType([tensor1, tensor2])
t4 = solver.gen_type("Identity", [tup1])
t5 = solver.gen_type("Identity", [tup2])
solver.gen_type("Identity", [t4], out=t5)
assert solver.Solve()
assert solver.Resolve(t3) == t3 or solver.Resolve(t3) == t2
assert solver.Resolve(t4) == tup1 or solver.Resolve(t4) == tup2
assert solver.Resolve(t5) == tup1 or solver.Resolve(t5) == tup2
solver.gen_type("Identity", [t1], out=tensor1)
solver.gen_type("Identity", [t2], out=tensor2)
assert solver.Solve()
assert solver.Resolve(t4) == tup_concrete
assert solver.Resolve(t5) == tup_concrete
def test_unify_quantified_fu |
ncs():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, b], c, [a, b, c])
ft2 = relay.FuncType([a, a], a, [a])
unified = solver.Unify(ft1, ft2)
assert unified == ft2
ft3 = relay.FuncType([a], a, [a])
ft4 = relay.FuncType([b], c, [b, c])
unified = solver.Unify(ft3, ft4)
assert unified == ft3
def test_unify_quantified_func_and_concrete():
solver = make_solver()
a, b = relay.TypeVar("a"), relay.TypeVar("b")
ft1 = relay.FuncType([a], b, [a, b])
ft2 = relay.FuncType([b], relay.TupleType([]), [b])
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_unify_quantified_funcs_nesting():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, relay.TupleType([b, c])], relay.TupleType([a, b, c]), [a, b, c])
ft2 = relay.FuncType([a, relay.TupleType([a, a])], relay.TupleType([a, a, a]), [a])
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_unify_quantified_funcs_var_order():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, relay.TupleType([b, c])], relay.TupleType([a, b, c]), [a, b, c])
ft2 = relay.FuncType([a, relay.TupleType([a, c])], relay.TupleType([a, a, c]), [a, c])
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_tuple_unification():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
tensor1 = relay.ty.TensorType((1, 2, 3), "float32")
tensor2 = relay.ty.TensorType((2, 3), "float32")
tensor3 = relay.ty.TensorType((3,), "float32")
tup1 = relay.ty.TupleType([relay.ty.TupleType([t1, t1]), t2])
tup2 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor3])
solver.Unify(tup1, tup2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_bad_recursive_unification():
sol |
ver = make_solver()
t1 = relay.ty.IncompleteType()
solver.Unify(t1, relay.ty.TupleType([t1, t1]))
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_unify_invalid_global_typevars():
solver = make_solver()
gtv1 = relay.GlobalTypeVar("gtv1")
gtv2 = relay.GlobalTypeVar("gtv2")
solver.Unify(gtv1, gtv2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_typecall_var_unification():
solver = make_solver()
gtv1 = relay.GlobalTypeVar("gtv1")
gtv2 = relay.GlobalTypeVar("gtv2")
t1 = relay.IncompleteType()
t2 = relay.IncompleteType()
tc1 = relay.TypeCall(gtv1, [t1])
tc2 = relay.TypeCall(gtv2, [t2])
solver.Unify(tc1, tc2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_typecall_args_unification():
solver = make_solver()
gtv = relay.GlobalTypeVar("gtv1")
t1 = relay.IncompleteType()
t2 = relay.IncompleteType()
tensor1 = relay.TensorType((1, 2, 3), "float32")
tensor2 = relay.TensorType((2, 3), "float32")
tensor3 = relay.TensorType((3,), "float32")
tc1 = relay.TypeCall(gtv, [relay.TupleType([t1, t1]), t2])
tc2 = relay.TypeCall(gtv, [relay.TupleType([tensor1, tensor2]), tensor3])
solver.Unify(tc1, tc2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_quantified_func_unification():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, b], c, [a, b, c])
ft2 = relay.FuncType([b, c], relay.TupleType([a]), [a, b, c])
solver.Unify(ft1, ft2)
def test_integer_compatibility_in_layout_transform():
x = relay.var("data", shape=(2, 3, 48, 48), dtype="float32")
conv_out = relay.nn.conv2d(
x,
relay.var("weight", shape=(1, 3, 1, 1), dtype="float32"),
strides=[47, 47],
channels=1,
kernel_size=[1, 1],
)
bias_out = relay.nn.bias_add(conv_out, relay.var("bias"))
broadcast_out = relay.op.broadcast_to(bias_out, relay.const([2, 1 |
, 2, 2], dtype="int64"))
y = relay.add(bias_out, broadcast_out)
mod, _ = testing.create_workload(y)
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("llvm"):
mod = relay.transform.CanonicalizeOps()(mod)
mod = relay.transform.AlterOpLayout()(mod)
if __name__ == "__main__":
test_bcast()
test_backward_solving()
test_unify_tuple()
test_unify_typecall()
test_unify_functype()
test_recursive_unify()
test_unify_vars_under_tuples()
test_recursive_backward_solving()
test_backward_solving_after_child_update()
test_unify_quantified_funcs()
test_unify_quantified_func_and_concrete()
test_unify_quantified_funcs_nesting()
test_unify_quantified_funcs_var_order()
test_incompatible_tuple_unification()
test_bad_recursive_unification()
test_incompatible_typecall_var_unification()
test_incompatible_typecall_args_unification()
test_incompatible_quantified_func_unification()
test_integer_compatibility_in_layout_transform() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
def test_dup_type():
a = relay.TypeVar("a")
av = relay.Var("av", a)
make_id = relay.Function([av], relay.Tuple([av, av]), None, [a])
t = relay.scalar_type("float32")
b = relay.Var("b", t)
mod = tvm.IRModule.from_expr(make_id(b))
mod = transform.InferType()(mod)
inferred = mod["main"].body
assert inferred.checked_type == relay.TupleType([t, t])
def test_id_type():
mod = tvm.IRModule()
id_type = relay.GlobalTypeVar("id")
a = relay.TypeVar("a")
mod[id_type] = relay.TypeData(id_type, [a], [])
b = relay.TypeVar("b")
make_id = relay.Var("make_id", relay.FuncType([b], id_type(b), [b]))
t = relay.scalar_type("float32")
b = relay.Var("b", t)
mod["main"] = relay.Function([make_id, b], make_id(b))
mod = transform.InferType()(mod)
assert mod["main"].body.checked_type == id_type(t)
if __name__ == "__main__":
test_dup_type()
test_id_type()
|
"""
Testing for the pass that annotates used memory for each primitive
Relay function.
""" |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay.expr_functor |
import ExprVisitor
def AnnotateUsedMemory():
return relay.transform._ffi_api.AnnotateUsedMemory() |
class CheckUsedMemoryAnnotation(ExprVisitor):
"""
Check that the annotations on each function in the graph match
what is expected.
"""
def __init__(self, expected_annotations, expected_io_annotation):
self.expected_annotations = expected_annotations
self.expected_io_annotation = expected_io_annotation
super().__init__()
def visit_function(self, fn):
if "Primitive" in fn.attrs:
assert (
"used_memory" in fn.attrs
), "Primitive function does not have used_memory annotation."
assert len(self.expected_annotations) > 0, "Not all expected annotations were compared"
expected_mem = self.expected_annotations.pop(0)
actual_mem = [int(x) for x in fn.attrs["used_memory"]]
assert expected_mem == actual_mem, (
f"Expected used memory annotation {expected_mem} "
f"did not match actual annotation {actual_mem}"
)
super().visit_function(fn)
def __call__(self, fn):
assert (
fn.attrs["io_used_memory"] == self.expected_io_annotation
), "Expected IO annotation did not match."
self.visit(fn.body)
def _check_used_memory_annotations(mod, expected_annotations, expected_io_annotation):
mod = relay.transform.InferType()(mod)
mod = relay.transform.ToANormalForm()(mod)
mod = relay.transform.InferType()(mod)
mod = AnnotateUsedMemory()(mod)
CheckUsedMemoryAnnotation(expected_annotations, expected_io_annotation)(mod["main"])
def _create_primitive_function(expr):
func = relay.Function(relay.analysis.free_vars(expr), expr)
func = func.with_attr("Primitive", 1)
return func
def test_simple():
"""
Test simple graph with one primitive function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, |
2, 2, 4), dtype="int8")
call = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(call)
expected_annotations = [
[2 * (1 * 2 * 2 * 4)],
]
expected_io_annotation = 2 * (1 * 2 * 2 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_multiple_functions():
"""
Test a graph with multiple primitive functions.
"""
def get_inner_func(ifm_shape):
x = relay.var("x", shape=ifm_shape, dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(2, 2), layout="NHWC")
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 8, 8, 2), dtype="int8")
x = get_inner_func((1, 8, 8, 2))
x = relay.Call(x, [ifm])
y = get_inner_func((1, 7, 7, 2))
y = relay.Call(y, [x])
z = get_inner_func((1, 6, 6, 2))
z = relay.Call(z, [y])
mod = tvm.IRModule.from_expr(z)
expected_annotations = [
[(1 * 8 * 8 * 2) + (1 * 7 * 7 * 2)],
[(1 * 7 * 7 * 2) + (1 * 6 * 6 * 2)],
[(1 * 6 * 6 * 2) + (1 * 5 * 5 * 2)],
]
expected_io_annotation = (1 * 8 * 8 * 2) + (1 * 5 * 5 * 2)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_mixed_data_types():
"""
Test a graph with a primitive function that has mixed datatypes.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 2), dtype="int16")
x = relay.cast(x, dtype="uint32")
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, 2), dtype="int16")
x = get_inner_func()
x = relay.Call(x, [ifm])
mod = tvm.IRModule.from_expr(x)
expected_annotations = [
[(1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4],
]
expected_io_annotation = (1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_parallel_function_call():
"""
Test a graph when the results of two functio |
ns are concatenated
into a single result. The second function will also have the result
of the first function alive so will be annotated with a larger
"used memory" value.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.reshape(x, newshape=(1, 4, 30))
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 4, 5, 6), dtype="int8")
x = relay.Call(get_inner_func(), [ifm])
y = relay.Call(get_inner_func(), [ifm])
z = relay.concatenate([x, y], axis=0)
mod = tvm.IRModule.from_expr(z)
expected_annotations = [
[(1 * 4 * 5 * 6) + (1 * 4 * 30)],
[(1 * 4 * 5 * 6) + (1 * 4 * 30) + (1 * 4 * 30)],
]
expected_io_annotation = (1 * 4 * 5 * 6) + (1 * 4 * 60)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_many_different_parallel_calls():
"""
Test a graph that calls many different functions in parallel.
input
/ | \
prim_func_1 prim_func_2 prim_func_3
\ | /
prim_func_4
"""
def get_inner_func_1():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.tanh(x)
x = _create_primitive_function(x)
return x
def get_inner_func_2():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(1, 1), layout="NHWC")
x = _create_primitive_function(x)
return x
def get_inner_func_3():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.abs(x)
x = relay.nn.relu(x)
x = relay.exp(x)
x = _create_primitive_function(x)
return x
def get_inner_func_4():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
y = relay.var("y", shape=(1, 4, 5, 6), dtype="int8")
z = relay.var("z", shape=(1, 4, 5, 6), dtype="int8")
out = re |
lay.concatenate([x, y, z], axis=3)
out = _create_primitive_function(out)
return out
ifm = relay.var("input", shape=(1, 4, 5, 6), dtype="int8")
x = relay.Call(get_inner_func_1(), [ifm])
y = relay.Call(get_inner_func_2(), [ifm])
z = relay.Call(get_inner_func_3(), [ifm])
a = relay.Call(get_inner_func_4(), [x, y, z])
mod = tvm.IRModule.from_expr(a)
expected_annotations = [
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 18)],
]
expected_io_annotation = (1 * 4 * 5 * 6) + (1 * 4 * 5 * 18)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_nested_branches():
"""
Tests a graph with branches that also branch.
input
/ \
/ \
prim_func_1 prim_func_2
/ \
/ \
prim_func_3 prim_func_4
"""
def get_generic_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.relu(x)
return _create_primitive_function(x)
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
a = relay.Call(get_generic_inner_func(), [ifm])
b = relay.Call(get_generic_inner_func(), [ifm])
c = relay.Call(get_generic_inner_func(), [b])
d = relay.Call(get_generic_inner_func(), [b])
out = relay.concatenate([a, c, d], axis=3)
mod = tvm.IRModule.from_expr(out)
expected_annotations = [
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
]
expected_io_annotation = (1 * 2 * 2 * 4) + (1 * 2 * 2 * 12) |
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_composite_inner_function():
"""
Tests the typical BYOC use case where a primitive function
contains a composite function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(2, 2), layout="NHWC")
x = relay.Function(relay.analysis.free_vars(x), x)
x = x.with_attr("Composite", "my_composite_func")
y = relay.var("y", shape=(1, 2, 2, 4), dtype="int8")
z = relay.Call(x, [y])
return _create_primitive_function(z)
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
x = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(x)
expected_annotations = [
[(1 * 2 * 2 * 4) + (1 * 1 * 1 * 4)],
]
expected_io_annotation = (1 * 2 * 2 * 4) + (1 * 1 * 1 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_multiple_calls_to_same_function():
"""
Tests the case when there are multiple calls to the same function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
inner_func = get_inner_func()
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call1 = relay.Call(inner_func, [ifm])
call2 = relay.Call(inner_func, [call1])
mod = tvm.IRModule.from_expr(call2)
expected_annotations = [[2 * (1 * 2 * 2 * 4), 2 * (1 * 2 * 2 * 4)]]
expected_io_annotation = 2 * (1 * 2 * 2 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_parallel_calls_to_same_function():
"""
Test parallel calls to the same function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x) |
return x
inner_func = get_inner_func()
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call1 = relay.Call(inner_func, [ifm])
call2 = relay.Call(inner_func, [ifm])
concat = relay.concatenate([call1, call2], axis=0)
mod = tvm.IRModule.from_expr(concat)
expected_annotations = [[2 * (1 * 2 * 2 * 4), 3 * (1 * 2 * 2 * 4)]]
expected_io_annotation = 3 * (1 * 2 * 2 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_parallel_calls_with_non_ifm_input():
"""
Test a graph that calls many different functions in parallel where
the input is not the input to the function.
y = f(x)
/ | \
z0 = g0(y) ... zi = gi(y)
\ | /
concat
"""
def get_inner_func_1():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.tanh(x)
x = _create_primitive_function(x)
return x
def get_inner_func_2():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(2, 2))
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 4, 5, 6), dtype="int8")
y = relay.Call(get_inner_func_1(), [ifm])
g = get_inner_func_2()
no_calls = 20
z = [relay.Call(g, [y]) for _ in range(0, no_calls)]
out = relay.concatenate(z, axis=3)
mod = tvm.IRModule.from_expr(out)
expected_annotations = [
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
[(1 * 4 * 5 * 6) + (1 * 4 * 4 * 5) * i for i in range(1, no_calls + 1)],
]
expected_io_annotation = (1 * 4 * 5 * 6) + (1 * 4 * 4 * (5 * no_calls))
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_dynamic_io_tensor_not_supported():
"""
Test to check dynamic IO tensor error.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.ma |
x_pool2d(x)
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, relay.Any()), dtype="int8")
call = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(call)
err_rgx = r"AnnotateUsedMemory does not support dynamic shapes"
with pytest.raises(tvm.TVMError, match=err_rgx):
_check_used_memory_annotations(mod, [], [])
def test_dynamic_callsite_tensor_not_supported():
"""
Test to check dynamic callsite tensor error.
"""
def get_inner_func():
x = relay.var("x", shape=(relay.Any(), 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(call)
err_rgx = r"AnnotateUsedMemory does not support dynamic shapes"
with pytest.raises(tvm.TVMError, match=err_rgx):
_check_used_memory_annotations(mod, [], []) |
import numpy as np |
import pytest |
import time
from unittest.mock |
import patch |
import tvm
from tvm |
import runtime
from tvm |
import relay, IRModule
from tvm.relay.backend |
import vm
from tvm.relay.scope_builder |
import ScopeBuilder
from tvm.relay.prelude |
import Prelude
from tvm.relay.loops |
import while_loop
from tvm.relay |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.