text
stringlengths 1
2.05k
|
---|
import utils
from tvm.ir.module |
import IRModule
from tvm.relay |
import testing, transform
from tvm.relay.testing |
import byoc
from tvm.relay.op.annotation |
import compiler_begin, compiler_end
from tvm.relay.backend |
import Executor, Runtime
from tvm.micro |
import model_library_format as mlf
from tvm.micro |
import export_model_library_format
from tvm.ir.instrument |
import pass_instrument
from tvm.testing.aot |
import (
AOTTestModel,
generate_ref_data,
compile_and_run,
compile_models,
create_relay_module_and_inputs_from_tflite_file,
)
from tvm.micro.testing.aot_test_utils |
import AOT_DEFAULT_RUNNER, parametrize_aot_options
from tvm.micro.testing.utils |
import get_conv2d_relay_module
def test_error_c_interface_with_packed_api():
"""Checks that an error occurs when using the packed API in combination with C interface"""
interface_api = "c"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(
tvm.TVMError,
match=re.escape(
'Either need interface_api == "packed" (got: c) or '
"unpacked-api == true (got: 0) when targeting "
"c runtime"
),
):
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func), inputs={}, outputs=generate_ref_data(func, {})
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_conv_with_params(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of convolution with parameters"""
mod = get_conv2d_relay_module()
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_with_params(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of add with parameters"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
input_z = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], input_z)
input_x_data |
= np.ones((1, 10)).astype("float32")
input_y_data = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": input_x_data}
inputs = {"y": input_y_data}
output_list = generate_ref_data(func, inputs, params)
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func),
inputs=inputs,
outputs=output_list,
params=params,
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
def test_packed_global_variables():
"""Check packed global variables in codegen output."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
interface_api = "packed"
use_unpacked_api = False
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=1)
main_f = relay.Function([data |
0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compiled_models_list = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=8,
enable_op_fusion=True,
pass_config=AOT_DEFAULT_RUNNER.pass_config,
use_runtime_executor=True,
target=tvm.target.Target("c"),
)
compiled_model = compiled_models_list[0]
tmp_path = utils.tempdir()
base_path = tmp_path.temp_dir
model = compiled_model.model
tar_file = os.path.join(base_path, f"{model.name}.tar")
export_model_library_format(compiled_model.executor_factory, tar_file)
t = tarfile.open(tar_file)
t.extractall(base_path)
file_list = []
for path in (pathlib.Path(base_path) / "codegen" / "host" / "src").iterdir():
if path.is_file():
file_list.append(path)
assert len(file_list) > 0
for path in file_list:
with open(path, "r") as lib_f:
lib1 = lib_f.readlines()
tvmgen_names = []
tvmgen_funcs = []
for line in lib1:
for item in line.split(" "):
if item.startswith("tvmgen_default"):
tvmgen_names.append(item)
tvmgen_funcs += re.findall(r"(?<=).*(?=\()", item)
for func in tvmgen_funcs:
assert f"{func}_packed" not in tvmgen_names
@parametrize_aot_options
def test_concatenate(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of concatenate"""
dtype = "float32"
input_x = relay.var("x", shape=(10, 5), dtype=d |
type)
input_y = relay.var("y", shape=(10, 5), dtype=dtype)
input_z = relay.var("z", shape=(), dtype=dtype)
concat_inputs = relay.concatenate((input_x, input_y), axis=1)
func_output = relay.add(input_z, concat_inputs)
func = relay.Function([input_x, input_y, input_z], func_output)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = OrderedDict([("x", x_data), ("y", y_data), ("z", t_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_nested_tuples(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of functions with nested tuple outputs"""
input_x = relay.var("x", shape=(10,))
output_1 = input_x + relay.const(1.0)
output_2 = output_1 + relay.const(1.0)
output_3 = output_2 + relay.const(1.0)
output_4 = output_3 + relay.const(1.0)
full_output = relay.Tuple(
[output_1, relay.Tuple([relay.Tuple([output_2, output_3]), output_4])]
)
func = relay.Function([input_x], full_output)
x_data = np.random.uniform(size=(10,)).astype(np.float32)
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_getitem(interface_api, use_unpacked_api, test_runner):
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use |
_unpacked_api,
)
@parametrize_aot_options
def test_id(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs = {"x": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(ident), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_const(interface_api, use_unpacked_api, test_runner):
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiply(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of multiply"""
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_subtract(interface_api, use_unpacked_api, test_runner):
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
inputs = {"i": i_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=ou |
tput_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_output(interface_api, use_unpacked_api, test_runner):
"""Tests getting items from tuples"""
x = relay.var("x", shape=(6, 9))
y = relay.split(x, 3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
func = relay.Function([x], out)
x_data = np.random.rand(6, 9).astype("float32")
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
["debug_calculated_workspaces", "workspace_byte_alignment"], [(True, 1), (True, 16), (False, 1)]
)
def test_mobilenet(debug_calculated_workspaces, workspace_byte_alignment):
"""Full network test with Mobilenet"""
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
debugging_memory_overhead = 1024 * 1024
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(
module=mod,
inputs=inputs,
outputs=output_list,
params=params,
extra_memory_in_bytes=debugging_memory_overhead,
),
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=workspace_byte_alignment,
debug_calculated_workspaces=debug_calculated_workspaces,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""
This is a simple test to check BYOC capabilities of AOT
with a |
nd without merging compiler regions to test for https:
"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
marked_input_x = compiler_begin(input_x, "ccompiler")
marked_input_w0 = compiler_begin(input_w0, "ccompiler")
add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
end_inner_add = compiler_end(add_x_and_w0, "ccompiler")
marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
marked_w1 = compiler_begin(input_w1, "ccompiler")
add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")
final_add = relay.add(end_inner_add, end_outer_add)
relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
mod = tvm.IRModule()
mod["main"] = relay_func
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm_multiple_subgraphs(merge_compiler_regions):
"""This is a test case to check BYOC capabilities of AOT with multiple sub graphs"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", |
shape=(10, 10))
input_w2 = relay.var("w2", shape=(10, 10))
input_w3 = relay.var("w3", shape=(10, 10))
input_w4 = relay.var("w4", shape=(10, 10))
input_w5 = relay.var("w5", shape=(10, 10))
input_w6 = relay.var("w6", shape=(10, 10))
input_w7 = relay.var("w7", shape=(10, 10))
ccompiler_add_1 = relay.add(input_x, input_w0)
ccompiler_sub_1 = relay.subtract(ccompiler_add_1, input_w1)
ccompiler_mul_1 = relay.multiply(ccompiler_sub_1, input_w2)
ccompiler_add_2 = relay.add(input_x, input_w3)
ccompiler_sub_2 = relay.subtract(ccompiler_add_2, input_w4)
ccompiler_mul_2 = relay.multiply(ccompiler_sub_2, input_w5)
tvm_add = relay.add(input_x, input_w6)
tvm_sub = relay.subtract(tvm_add, input_w7)
concat_outputs = relay.concatenate((ccompiler_mul_1, ccompiler_mul_2, tvm_sub), axis=0)
relay_func = relay.Function(
[input_x, input_w0, input_w1, input_w2, input_w3, input_w4, input_w5, input_w6, input_w7],
concat_outputs,
)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(relay_func)
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
output_list = generate_ref_data(mod, map_inputs)
input_list = [map_inputs["x"]]
input_list.extend([map_inputs["w{}".format(i)] for i in range(8)])
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_name_mangling_with_params(interface_api, use_unpacked_api, test_runner):
"""Checks name mang |
ling works with parameters"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
func_add = relay.add(input_x, input_y)
relay_func = relay.Function([input_x, input_y], func_add)
x_in = np.ones((1, 10)).astype("float32")
y_in = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": x_in}
inputs = {"y": y_in}
output_list = generate_ref_data(relay_func, inputs, params)
compile_and_run(
AOTTestModel(
name="my_mod",
module=relay_func,
inputs=inputs,
outputs=output_list,
params=params,
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiple_models(interface_api, use_unpacked_api, test_runner):
"""Compiles multiple models to ensure both can be compiled into one output"""
x = relay.var("x", "float32")
mod1 = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs1 = {"x": one}
output_list1 = generate_ref_data(mod1, inputs1)
params1 = None
mod2 = get_conv2d_relay_module()
main_func = mod2["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params2 = {"weight": weight_data}
inputs2 = {"data": input_data}
output_list2 = generate_ref_data(mod2, inputs2, params2)
compile_and_run(
[
AOTTestModel(
name="mod1",
module=mod1,
inputs=inputs1,
outputs=output_list1,
params=params1,
),
AOTTestModel(
name="mod2",
module=mod2,
inputs=inputs2,
outputs=output_list2,
params=params2, |
),
],
test_runner,
interface_api,
use_unpacked_api,
)
def test_quant_mobilenet_tfl():
"""Since in AOT we pass directly the output buffer from the user,
in quantized networks sharing the output buffers is not possible.
This is because the output data type is int8 and the intermediate
buffer are int32 or int16. We use mobilenet quantized to stress this
situation and verify that the output buffer sharing is disabled in AOT."""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
tflite_model_file = tf_testing.get_workload_official(
"https:
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_transpose(interface_api, use_unpacked_api, test_runner):
"""Test that non-inpleaceable operations (e.g., transpose) do not happen in-place."""
dtype = "float32"
input_x = relay.var("x", shape=(10, 5), dtype=dtype)
input_y = relay.var("y", shape=(10, 5), dtype=dtype)
input_z = relay.var("z", shape=(), dtype=dtype)
first_add = relay.add(input_x, input_y)
transpose_add = relay.transpose(first_add)
final_add = relay.add(transpose_add, input_z)
relay_func = relay.Function([input_x, input_y, input_z], final_add)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"x": x_data, "y": y_data, "z": t_data}
output_list = generate_ref_data(relay_func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(relay_func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser():
"""Test that input tensors with special characters in the name don't break compilation"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
func = relay.var("input-x::2", "float32")
ident = relay.Function([func], func) |
one = np.array(1.0, "float32")
inputs = {"input-x::2": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser_name_clash():
"""Test that 2 input tensors with names that clash once sanitized, generates an error"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
dtype = "float32"
input_non_clashing = relay.var("input::-1", shape=(10, 5), dtype=dtype)
input_clashing_1 = relay.var("input::-2", shape=(10, 5), dtype=dtype)
input_clashing_2 = relay.var("input:--2", shape=(), dtype=dtype)
inner_add = relay.add(input_non_clashing, input_clashing_1)
transpose_add = relay.transpose(inner_add)
final_add = relay.add(transpose_add, input_clashing_2)
func = relay.Function([input_non_clashing, input_clashing_1, input_clashing_2], final_add)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"input::-1": x_data, "input::-2": y_data, "input:--2": t_data}
output_list = generate_ref_data(func, inputs)
with pytest.raises(TVMError, match="Sanitized input tensor name clash"):
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_aot_codegen_backend_alloc_workspace_calls():
"""This test checks whether AoT lowering creates TVMBackendAllocWorkspace calls"""
relay_mod = tvm.parser.fromtext(
"""
def @main(%data: Tensor[(1, 4, 4, 4), float32], %weight: Tensor[(4, 4, 3, 3), float32], src_layout="OIHW", dst_layout="OIHW4i4o") -> Ten |
sor[(1, 4, 4, 4), float32] {
%0 = fn (%p02: Tensor[(1, 4, 4, 4), float32], Primitive=1, hash="9332b3872fb5292c", src_layout="NCHW", dst_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
layout_transform(%p02, src_layout="NCHW", dst_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%1 = fn (%p03: Tensor[(4, 4, 3, 3), float32], Primitive=1, hash="9f0b2b8a24a4dab3", src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 1, 3, 3, 4, 4), float32] {
layout_transform(%p03, src_layout="OIHW", dst_layout="OIHW4i4o") /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */
};
%2 = %0(%data) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%3 = %1(%weight) /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */;
%4 = fn (%p01: Tensor[(1, 1, 4, 4, 4), float32], %p1: Tensor[(1, 1, 3, 3, 4, 4), float32], out_layout="NCHW4c", kernel_layout="OIHW4i4o", Primitive=1, data_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
nn.contrib_conv2d_NCHWc(%p01, %p1, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NCHW4c", kernel_layout="OIHW4i4o", out_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%5 = %4(%2, %3) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%6 = fn (%p0: Tensor[(1, 1, 4, 4, 4), float32], Primitive=1, src_layout="NCHW4c", dst_layout="NCHW") -> Tensor[(1, 4, 4, 4), float32] {
layout_transform(%p0, src_layout="NCHW4c", dst_layout="NCHW") /* ty=Tensor[(1, 4, 4, 4), float32] */
};
%6(%5) /* ty=Tensor[(1, 4, 4, 4), float32] */
}
"""
)
compiled_test_mods = compile_models(
models=AOTTestModel(module=relay_mod, inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True, |
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
assert source.count("TVMBackendAllocWorkspace") == 3
@pytest.mark.parametrize("constants_byte_alignment", [8, 16, 32])
def test_constants_alignment(constants_byte_alignment):
"""Test that constants_byte_alignment correctly sets constants byte alignment"""
use_unpacked_api = True
interface_api = "c"
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
target = f"c -constants-byte-alignment={constants_byte_alignment}"
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
target=tvm.target.Target(target, host=target),
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
assert f'__attribute__((section(".rodata.tvm"), aligned({constants_byte_alignment})))' in source
def test_output_tensor_names():
"""Test that the output names generated match those in the model"""
pytest.importorskip("tflite") |
import tensorflow as tf |
import tflite.Model
ifm_shape = (1, 299, 299, 3)
padding = "VALID"
strides = (1, 1)
dilation = (1, 1)
kernel_shape = (3, 2)
def create_tflite_graph_two_outs():
"""Create a model with 2 output tensors""" |
class Model(tf.Module):
"""Simple TFLite test model"""
@tf.function
def tf_function(self, tf_input_x):
"""Single TFLite function with two convolutions"""
tf_strides = [1, strides[0], strides[1], 1]
filter_shape = [kernel_shape[0], kernel_shape[1], 3, 3]
filter1 = tf.constant(
np.arange(np.prod(filter_shape)).reshape(filter_shape),
dtype=tf.float32,
)
first_conv2d = tf.nn.conv2d(
tf_input_x,
filters=filter1,
strides=tf_strides,
padding=padding,
dilations=dilation,
)
first_conv2d = tf.nn.relu(first_conv2d)
filter2 = tf.constant(
1000 + np.arange(np.prod(filter_shape)).reshape(filter_shape),
dtype=tf.float32,
)
second_conv2d = tf.nn.conv2d(
tf_input_x,
filters=filter2,
strides=strides,
padding=padding,
data_format="NHWC",
dilations=dilation,
)
second_conv2d = tf.nn.relu(second_conv2d)
return first_conv2d, second_conv2d
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.in |
ference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph_two_outs()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": "int8"},
)
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
in_min, in_max = (-128, 127)
data = np.random.randint(in_min, high=in_max, size=ifm_shape, dtype="int8")
input_name = mod["main"].params[0].name_hint
inputs = {input_name: data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
)
source = compiled_test_mods[0].executor_factory.lib.get_source()
for output_name in output_list.keys():
assert output_name in source
@pytest.mark.parametrize(
"workspace_byte_alignment,main_workspace_size",
[
(8, 14880),
(16, 14880),
(256, 15616),
],
)
def test_workspace_calculation(workspace_byte_alignment, main_workspace_size):
"""Checks calculated workspace against known values"""
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
},
)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params |
)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == main_workspace_size
@tvm.testing.requires_package("tflite")
@tvm.testing.requires_cmsisnn
def test_workspace_calculation_cmsis_nn():
"""This tests cmsis_nn codegen for workspace calculation.
This is tested specially because cmsis-nn codegen creates
multiple PrimFuncs per offloaded relay function in a non
-hierarchical manner."""
pytest.importorskip("tflite")
from tvm.relay.op.contrib |
import cmsisnn
from tvm.contrib.download |
import download_testdata
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 16,
"interface-api": "c",
"unpacked-api": True,
},
)
base_url = (
"https:
"48a22ee22325d15d2371a6df24eb7d67e21dcc97"
"/models/keyword_spotting/cnn_small/tflite_int8"
)
file_to_download = "cnn_s_quantized.tflite"
file_saved = "cnn_s_quantized_15Dec2021.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
mod, _, params = create_relay_module_and_inputs_from_tflite_file(model_file)
mod = cmsisnn.partition_for_cmsisnn(mod, params)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == 14256
def test_aot_codegen_checks_returns():
"""This test checks whether AoT lowering creates calls that check the return value correctly"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
func_add = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], func_add)
compiled_test_mods = compile_models(
models=AOTTestModel(module=IRModule.from_expr(func), inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True,
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
main_ir_module = compiled_test_mods[0].executor_factory.lowered_ir_mods.items()[0][1]
main_func = main_ir_module["__tvm_main__"]
assert (
str(main_func.body[1])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"tvmgen_default_fused_add",'
+ " x_buffer_var, y_buffer_var, output |
_buffer_var))\n"
)
assert (
"if (tvmgen_default_fused_add(x_buffer_var, y_buffer_var, output_buffer_var) != 0 ) return -1;"
in source
)
def test_aot_uses_anf():
"""Checks that A-Normal Form is being used in the AOT lowering pipeline."""
input_x = relay.var("x", shape=(1, 10, 10, 10))
input_y = relay.var("y", shape=(1, 10, 10, 10))
func_add = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], func_add)
@pass_instrument
class CheckANFRuns:
def __init__(self):
self.did_run_anf = False
def run_before_pass(self, _, info):
if info.name == "ToANormalForm":
self.did_run_anf = True
if info.name == "LowerTE":
assert self.did_run_anf, "ToANormalForm pass should run before LowerTE."
check_run_anf = CheckANFRuns()
model = AOTTestModel(module=IRModule.from_expr(func), inputs=None, outputs=None)
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 8,
"interface-api": "c",
"unpacked-api": True,
},
)
config = {"tir.disable_vectorize": True}
with tvm.transform.PassContext(opt_level=3, config=config, instruments=[check_run_anf]):
tvm.relay.build(
model.module,
tvm.target.Target("c"),
executor=executor,
runtime=runtime,
workspace_memory_pools=None,
params=model.params,
mod_name=model.name,
)
assert check_run_anf.did_run_anf, "Expected ToANormalForm pass to have run."
if __name__ == "__main__":
tvm.testing.main() |
""" This file contains test that use USMP + AoT using C runtime APIs"""
from collections |
import OrderedDict |
import re |
import random |
import numpy as np |
import pytest |
import tvm
from tvm |
import relay
from tvm.relay |
import testing
from tvm.relay |
import transform
from tvm.relay.op.annotation |
import compiler_begin, compiler_end
from tvm.relay.backend |
import Executor, Runtime
from tvm |
import (
WorkspaceMemoryPools,
ConstantMemoryPools,
WorkspacePoolInfo,
ConstantPoolInfo,
PoolInfoProperties,
)
from tvm.micro |
import model_library_format as mlf
from tvm.micro.testing.aot_test_utils |
import parametrize_aot_options
from tvm.testing.aot |
import (
AOTTestModel,
AOTTestRunner,
generate_ref_data,
compile_and_run,
compile_models,
run_and_check,
create_relay_module_and_inputs_from_tflite_file,
)
from tvm.testing.usmp |
import is_tvm_backendallocworkspace_calls
def _check_for_no_tvm_backendallocworkspace_calls(mod: tvm.runtime.module):
assert (
is_tvm_backendallocworkspace_calls(mod) is False
), "This is failing because USMP was unable to plan for every tir.allocate node."
@parametrize_aot_options
def test_synthetic(interface_api, use_unpacked_api, test_runner):
"""
Simple U1 usecase test
"""
mod, params = tvm.relay.testing.synthetic.get_workload()
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {}
for name, _ in shape_dict.items():
if name != "data":
params[name] = np.ones(shape_dict[name]).astype(type_dict[name])
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
config = (
{
"tir.disable_vectorize": True,
"tir.disable_storage_rewrite": True,
"tir.usmp.enable": True,
"tir.usmp.algorithm": "greedy_by_conflicts",
},
)
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config={**test_runner.pass_config},
)
test_runner.pass_config.update(*config)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
"workspace_byte_alignment,constant_byte_alignment,"
"main_workspace_size,main_constant_size,usmp_algo",
[
(8, 8, 14208, 948, "greedy_by_conflicts"),
(16, 8, 14208, 948, "greedy_by_conflicts"),
(256, 8, 14720, 948, "greedy_by_conflicts"),
(8, 1 |
6, 14208, 956, "greedy_by_conflicts"),
(16, 16, 14208, 956, "greedy_by_conflicts"),
(256, 16, 14720, 956, "greedy_by_conflicts"),
(8, 256, 14208, 1804, "greedy_by_conflicts"),
(16, 256, 14208, 1804, "greedy_by_conflicts"),
(256, 256, 14720, 1804, "greedy_by_conflicts"),
(8, 8, 18576, 948, "greedy_by_size"),
(16, 8, 18576, 948, "greedy_by_size"),
(256, 8, 19392, 948, "greedy_by_size"),
(8, 16, 18576, 956, "greedy_by_size"),
(16, 16, 18576, 956, "greedy_by_size"),
(256, 16, 19392, 956, "greedy_by_size"),
(8, 256, 18576, 1804, "greedy_by_size"),
(16, 256, 18576, 1804, "greedy_by_size"),
(256, 256, 19392, 1804, "greedy_by_size"),
(8, 8, 11424, 948, "hill_climb"),
(16, 8, 11424, 948, "hill_climb"),
(256, 8, 11920, 948, "hill_climb"),
(8, 16, 11424, 956, "hill_climb"),
(16, 16, 11424, 956, "hill_climb"),
(256, 16, 11920, 956, "hill_climb"),
(8, 256, 11424, 1804, "hill_climb"),
(16, 256, 11424, 1804, "hill_climb"),
(256, 256, 11920, 1804, "hill_climb"),
],
)
def test_memory_planning(
workspace_byte_alignment,
constant_byte_alignment,
main_workspace_size,
main_constant_size,
usmp_algo,
):
"""Checks calculated workspace against known values"""
random.seed(0)
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
"constant-byte-alignment": constant_byte_alignment,
},
)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.disable_storage_rewrite": True,
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params) |
assert (
sum(lib.function_metadata["__tvm_main__"].workspace_sizes.values()) == main_workspace_size
)
assert sum(lib.function_metadata["__tvm_main__"].constant_sizes.values()) == main_constant_size
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
compiled_test_mods = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=com |
piled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""
This is a simple test to check BYOC capabilities of AOT
with and without merging compiler regions to test for https:
"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOTTestRunner(pass_config={"tir.usmp.enable": True})
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
marked_input_x = compiler_begin(input_x, "ccompiler")
marked_input_w0 = compiler_begin(input_w0, "ccompiler")
add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
end_inner_add = compiler_end(add_x_and_w0, "ccompiler")
marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
marked_w1 = compiler_begin(input_w1, "ccompiler")
add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")
final_add = relay.add(end_inner_add, end_outer_add)
relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
mod = tvm.IRModule()
mod["main"] = relay_func
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compiled_test_mods = compile_models(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods: |
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
MOBILENET_V1_URL = (
"https:
+ "mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
MOBILENET_V2_URL = (
"https:
+ "tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite",
)
@pytest.mark.parametrize(
"model_url, usmp_algo, workspace_size, constant_size",
[
(MOBILENET_V1_URL, "greedy_by_size", 4845696, 8468008),
(MOBILENET_V1_URL, "greedy_by_conflicts", 4845696, 8468008),
(MOBILENET_V1_URL, "hill_climb", 3240064, 8468008),
],
)
def test_tflite_model_u1_usecase(model_url, usmp_algo, workspace_size, constant_size):
"""
This checks for ML models and the memory used by them
when using USMP with different algorithms
"""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo}
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
mlf_memory_map = mlf._build_function_memory_map(
compiled_test_mods[0].executor_factory.function_metadata
)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == workspace_size
assert mlf_memory_map["main"][0]["constants_size_bytes"] == constant_size
allocated_pool_info_size = sum(
[
_.allocated_size
for _ in list(
dict(
compiled_test_mods[0].executor_factory.executor_codegen_metadata.pool_inputs
).values()
)
]
)
assert allocated_pool_info_size == workspace_size + constant_size
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
def _get_workspace_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_WORKSPACE_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def _get_constant_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function |
converts pool names to compiler generated
pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_CONSTANT_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def _get_constant_data_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool data macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_CONSTANT_POOL_DATA"
return prefix + pool_name.upper() + postfix
def _add_module_prefix(suffix: str, model_name="default") -> str:
"""A helper function create struct types"""
return "tvmgen_" + model_name + "_" + suffix
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_single_external_pool(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
pool_name = "my_memory_pool"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo(pool_name, [target])])
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t {pool_name}[{_get_workspace_size_define_macro(pool_name)}];
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"usmp_algo",
[("greedy_by_size"), ("hill_climb")],
)
def test_tflite_model_u3_usecase_conv2d_var_cons(usmp_algo):
"""This checks for inference using workspace and constant pools placed in the application"""
mod = tvm.parser.fromtext(
"""\
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kerne |
l_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_mem_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000)
),
]
)
constant_mem_pools = ConstantMemoryPools(
[
ConstantPoolInfo("my_const_pool_1", [target], []),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }};
""",
)
output_list = generate_ref_data(mod, inputs, params)
compiled_tes |
t_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_mem_pools,
constant_memory_pools=constant_mem_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_var_cons_ext_pools(model_url, usmp_algo):
"""This checks for inference using one external workspace and one external constant
pools placed in the application"""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_mem_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000)
),
]
)
constant_mem_pools = ConstantMemoryPools(
[
ConstantPoolInfo("my_const_pool_1", [target], []),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }};
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_mem_pools,
constant_memory_pools=constant_mem_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[ |
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_two_external_pools(model_url, usmp_algo):
"""This checks for inference using two external pools placed in the application"""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=2500000)
),
WorkspacePoolInfo("my_memory_pool_2", [target]),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_2[{_get_workspace_size_define_macro("my_memory_pool_2")}];
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_urls, usmp_algo",
[
((MOBILENET_V1_URL, MOBILENET_V2_URL), "greedy_by_size"),
],
)
def test_two_models_with_a_single_external_pool(model_urls, usmp_algo):
"""This checks for inference using a single large enough common pool"""
pytest.importorskip("tfli |
te") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo("my_memory_pool", [target])])
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool[MAX({_get_workspace_size_define_macro("my_memory_pool", "mod1")},{_get_workspace_size_define_macro("my_memory_pool", "mod2")})];
""",
)
tflite_model_file1 = tf_testing.get_workload_official(
model_urls[0][0],
model_urls[0][1],
)
mod1, inputs1, params1 = create_relay_module_and_inputs_from_tflite_file(tflite_model_file1)
output_list1 = generate_ref_data(mod1, inputs1, params1)
tflite_model_file2 = tf_testing.get_workload_official(
model_urls[1][0],
model_urls[1][1],
)
mod2, inputs2, params2 = create_relay_module_and_inputs_from_tflite_file(tflite_model_file2)
output_list2 = generate_ref_data(mod2, inputs2, params2)
compiled_test_mods = compile_models(
[
AOTTestModel(
name="mod1", module=mod1, inputs=inputs1, outputs=output_list1, params=params1
),
AOTTestModel(
name="mod2", module=mod2, inputs=inputs2, outputs=output_list2, params=params2
),
],
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[ |
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u4_usecase_single_external_pool(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
pool_name = "my_memory_pool"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo(pool_name, [target])])
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
input_name, input_data = list(inputs.items())[0]
input_size_bytes = input_data.size * input_data.itemsize
test_runner = AOTTestRunner(
pass_config={
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
"tir.usmp.use_workspace_io": True,
},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t {pool_name}[{_get_workspace_size_define_macro(pool_name)}];
struct {_add_module_prefix("workspace_pools")} {_add_module_prefix("workspace_pools")} = {{
.{pool_name} = {pool_name}
}};
struct {_add_module_prefix("inputs")} {_add_module_prefix("inputs")} = {_add_module_prefix("map_inputs")}(&{_add_module_prefix("workspace_pools")});
memcpy({_add_module_prefix("inputs")}.{input_name}, tvmgen_default_input_data_input, {input_size_bytes});
struct {_add_module_prefix("outputs")} {_add_module_prefix("outputs")} = {_add_module_prefix("map_outputs")}(&{_add_module_prefix("workspace_pools")});
""",
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_ |
model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
use_workspace_io=True,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u4_usecase_two_external_pools(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite") |
import tvm.relay.testing.tf as tf_testing
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=2500000)
),
WorkspacePoolInfo("my_memory_pool_2", [target]),
]
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
input_name, input_data = list(inputs.items())[0]
input_size_bytes = input_data.size * input_data.itemsize
test_runner = AOTTestRunner(
pass_config={
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
"tir.usmp.use_workspace_io": True,
},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_2[{_get_workspace_size_define_macro("my_memory_pool_2")}];
struct {_add_module_prefix("workspace_pools")} {_add_module_prefix("workspace_pools")} = {{
.my_memory_pool_1 = my_memory_pool_1,
.my_memory_pool_2 = my_memory_pool_2,
}};
struct {_add_module_prefix("inputs")} {_add_module_prefix("inputs")} = {_add_module_prefix("map_inputs")}(&{_add_module_prefix("workspace_pools")});
memcpy({_add_module_prefix("inputs")}.{input_name}, tvmgen_default_input_data_input, {input_size_bytes});
struct {_add_module_prefix("outputs")} {_add_module_prefix("outputs")} = {_add_module_prefix("map_outputs")}(&{_add_module_prefix("workspace_pools")});
""",
)
compiled_test_mods = compile_models(
AOTTestModel(mod |
ule=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
use_workspace_io=True,
)
def test_incompatible_interface_api_errors():
"""Ensures an error is thrown if not using the C interface API"""
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"interface-api": "packed",
},
)
with pytest.raises(
tvm.TVMError,
match=re.escape(
"tir.usmp.use_workspace_io option is only compatible with interface_api c.\n"
"Please use interface_api c to be able to enable tir.usmp.use_workspace_io"
),
):
with tvm.transform.PassContext(
opt_level=3,
config={"tir.usmp.enable": True, "tir.usmp.use_workspace_io": True},
):
tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm.ir |
import assert_structural_equal
from tvm.relay.backend.aot |
import AOTLowerMain, CallType
from tvm.script |
import tir as T
def _make_const(dtype, shape):
return tvm.relay.const(np.zeros(shape).astype(dtype))
def _make_consts(dtype, shapes):
return [_make_const(dtype, shape) for shape in shapes]
def _plan_devices(mod):
host_target = tvm.target.Target("llvm")
prim_target = tvm.target.Target("llvm", host=host_target)
ctxt = tvm.transform.PassContext()
config = tvm.target.make_compilation_config(ctxt, prim_target)
mod = tvm.relay.transform.PlanDevices(config)(mod)
mod = tvm.relay.transform.InferType()(mod)
return mod, config
def _assert_lowered_main(mod, main_func, call_type, print_script=False):
mod, config = _plan_devices(mod)
mod = AOTLowerMain("test_mod", config, call_type)(mod)
if print_script:
print(mod["__tvm_main__"].script())
assert_structural_equal(mod["__tvm_main__"], main_func)
def test_single_call_cpacked():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
def test_single_call_packed():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tens |
or[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_check_return(0, -1, T.tvm_call_packed("test_fused_add", a_buffer.data, output_buffer.data, dtype="int32"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.Packed)
def test_single_call_unpacked():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("test_fused_add", a_buffer.data, output_buffer.data, dtype="int32"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.Unpacked)
def test_constant():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] |
{
%0 = (%a, meta[relay.Constant][0]) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
init_meta_table={"relay.Constant": _make_consts("float32", [(5, 7)])},
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "global_symbol": "test_mod___tvm_main__", "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
@pytest.mark.xfail()
def test_copy_to_output():
mod = tvm.parser.parse(
"""
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%a
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
tmp_read = T.buffer_var("uint8", "")
tmp_read_1 = T.buffer_decl([T.uint64(140)], dtype="uint8", data=tmp_read)
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
tmp_write: T.Ptr[T.uint8] = output_buffer.data
tmp_write_1 = T.buffer_decl([T.uint64(140)], dtype="uint8", data=tmp_write)
for |
i in T.serial(140):
tmp_write_1[i] = T.let(tmp_read, a_buffer.data, tmp_read_1[i])
_assert_lowered_main(mod, func, CallType.CPacked)
def test_two_calls():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
%1 = call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */;
%2 = (%1,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %2) /* ty=Tensor[(5, 7), float32] */
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_2 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", sid_2, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
def test_tuple_output():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) { (%x, %x) }
def @main(%a: Tensor[(5, 7), float32]) -> (Tensor[(5, 7), float32], Tensor[(5, 7), float32]) {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */
}
""",
)
@T.prim_func
def func(a: T.handle, output0: T.handle, output1: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_functio |
n": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output0, output1], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output0_buffer = T.match_buffer(output0, [5, 7], dtype="float32", align=16)
output1_buffer = T.match_buffer(output1, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output0_buffer.data, output1_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
def test_tuple_intermediate():
mod = tvm.parser.parse(
"""
def @test_fused_add_0(%x: Tensor[(5, 7), float32]) -> (Tensor[(5, 7), float32], Tensor[(5, 7), float32]) { (%x, %x) }
def @test_fused_add_1(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
%1 = call_lowered(@test_fused_add_0, %0);
%2 = (%1.0, %1.1);
call_lowered(@test_fused_add_1, %2)
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_ma |
in(mod, func, CallType.CPacked)
def test_multi_input():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a, %b) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
@T.prim_func
def func(a: T.handle, b: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a, b], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, b_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
def test_let_binding():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
let %v1 = call_lowered(@test_fused_add, %0);
%v1
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_bu |
ffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
def test_let_binding_branch():
mod = tvm.parser.parse(
"""
def @test_fused_add_0(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @test_fused_add_1(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
let %v0 = call_lowered(@test_fused_add_0, %0);
%1 = (%v0,);
let %v1 = call_lowered(@test_fused_add_0, %1);
%2 = (%v1,);
let %v2 = call_lowered(@test_fused_add_0, %2);
%3 = (%v1, %v2);
let %v3 = call_lowered(@test_fused_add_1, %3);
%v3
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
_assert_lowered_main(mod, func, CallType.CPacked)
def te |
st_device_hooks():
mod = tvm.parser.parse(
"""
def @test_fused_add(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
%1 = call_lowered(@test_fused_add, %0);
%2 = (%1,);
call_lowered(@test_fused_add, %2)
}
""",
)
@T.prim_func
def func(a: T.handle, output: T.handle, device_context_example_target_hook: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["example_target_hook"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookActivate", device_context_example_target_hook, dtype="int32"), dtype="int32"))
with T.allocate([140], "int8", "global.workspace") as sid_2:
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookOpen", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, sid_2, device_context_example_target_hook, dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookClose", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookOpen", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", sid_2, output_buffer.data, device_context_example_target_hook, dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookClose", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.eval |
uate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookDeactivate", device_context_example_target_hook, dtype="int32"), dtype="int32"))
device_contexts = {}
for gv in mod.get_global_vars():
device_contexts[gv] = "example_target_hook"
mod = mod.with_attr("device_contexts", device_contexts)
_assert_lowered_main(mod, func, CallType.CPacked)
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing |
import logging
logging.basicConfig()
logger = logging.getLogger("test_pass_lower_te")
logger.setLevel(logging.INFO)
LowerTE = tvm._ffi.get_global_func("relay.tec.LowerTE")
def transform(mod):
logger.info("Starting module:\n%s", mod)
host_target = tvm.target.Target("llvm")
prim_target = tvm.target.Target("llvm", host=host_target)
ctxt = tvm.transform.PassContext()
config = tvm.target.make_compilation_config(ctxt, prim_target)
mod = tvm.relay.transform.PlanDevices(config)(mod)
mod = tvm.relay.transform.InferType()(mod)
mod = LowerTE("test", config)(mod)
mod = tvm.relay.transform.InferType()(mod)
logger.info("After LowerTE:\n%s", mod)
return mod
def test_lower_primitive():
input_mod = tvm.parser.parse(
"""
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = fn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Primitive=1) -> Tensor[(5, 7), float32] {
add(%x, %y)
};
%0(%a, %a)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "test_fused_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Primitive == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 1
assert call.attrs.metadata["all_prim_fn_vars"][0].name_hint == "test_fused_add"
test_fused_add = actual_mod["test_fused_add"]
assert isinstance(test_fused_add, tvm.tir.PrimFunc)
def test_lower_compiler():
@tvm._ffi.register_func("relay.ext.test_pass_lower_te")
def relay_ext_test_pass_lower_te(func):
return None
input_mod = tvm.parser.parse(
"""
d |
ef @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = fn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Primitive=1, Compiler="test_pass_lower_te", global_symbol="test_add") -> Tensor[(5, 7), float32] {
add(%x, %y)
};
%0(%a, %a)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "test_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Primitive == 1
assert call.attrs.metadata["relay_attrs"].Compiler == "test_pass_lower_te"
assert call.attrs.metadata["relay_attrs"].global_symbol == "test_add"
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
test_add = actual_mod["test_add"]
assert isinstance(test_add, tvm.relay.Function)
assert test_add.attrs["Extern"] == 1
def test_lower_extern():
input_mod = tvm.parser.parse(
"""
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
@my_add(%a, %a)
}
def @my_add(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
add(%x, %y)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "my_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Extern = |
= 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
test_add = actual_mod["my_add"]
assert isinstance(test_add, tvm.relay.Function)
assert test_add.attrs["Extern"] == 1
def test_lower_extern_with_dynamic_shape():
input_mod = tvm.parser.parse(
"""
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(?, ?), float32] {
@my_dyn(%a, %a)
}
def @my_dyn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Extern=1) -> Tensor[(?, ?), float32] {
add(%x, %y)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "my_dyn"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["prim_shape_fn_var"].name_hint == "test_shape_func_add"
assert call.attrs.metadata["relay_attrs"].Extern == 1
assert len(call.attrs.metadata["prim_shape_fn_states"]) == 2
assert call.attrs.metadata["prim_shape_fn_states"][0] == 2
assert call.attrs.metadata["prim_shape_fn_states"][1] == 2
assert call.attrs.metadata["prim_shape_fn_num_inputs"] == 2
assert len(call.attrs.metadata["all_prim_shape_fn_vars"]) == 1
assert call.attrs.metadata["all_prim_shape_fn_vars"][0].name_hint == "test_shape_func_add"
assert call.attrs.metadata["prim_shape_fn_num_outputs"] == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
my_dyn = actual_mod["my_dyn"]
assert isinstance(my_dyn, tvm.relay.Function)
assert my_dyn.attrs["Extern"] == 1
shape_func_add = actual_mod["test_shape_func_add"]
assert isinstance(shape_func_add, tvm.tir.PrimFunc)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import relay
from tvm.relay.expr_functor |
import ExprMutator |
import tvm.testing
from tvm.script |
import tir as T
HOST_DEVICE = tvm.device("cpu")
HOST_TARGET = tvm.target.Target("llvm")
CPU_DEVICE = tvm.device("cpu")
CPU_TARGET = tvm.target.Target("llvm").with_host(HOST_TARGET)
CPU = tvm.target.VirtualDevice(CPU_DEVICE, CPU_TARGET)
RemoveStandaloneReshapes = tvm._ffi.get_global_func("relay._transform.RemoveStandaloneReshapes") |
class MarkReshapeOnlyMutator(ExprMutator):
"""A pass for marking call_lowered as ReshapeOnly where reshapes exist unfused"""
def __init__(self):
ExprMutator.__init__(self)
def visit_call(self, call):
if isinstance(call.args[0], tvm.ir.GlobalVar) and "reshape" in call.args[0].name_hint:
dict_attrs = tvm.ir.make_node("DictAttrs", **{"relay.reshape_only": 1})
attrs = tvm.ir.make_node(
"relay.attrs.CallLoweredAttrs", **{"metadata": {"relay_attrs": dict_attrs}}
)
return relay.Call(call.op, call.args, attrs)
return super().visit_call(call)
def test_first_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("reshape", type_annot=reshape_ty)
mod[reshape_gv] = reshape_primfunc
mod = tvm.parser.parse(
"""
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%1 = call_lowered(@reshape, (%x,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
mod["main"] = MarkReshapeOnlyMutator().visit(mod["main"])
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert reshapes_present, "Reshape should have been removed."
return
def test_last_reshape():
mod = tvm.ir.IRModu |
le()
@T.prim_func
def mul_primfunc(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
D[vi, vj] = A[vi, vk] * B[vj, vk]
@T.prim_func
def reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
mul_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
mul_gv = relay.GlobalVar("multiply", type_annot=mul_ty)
mod[mul_gv] = mul_primfunc
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("reshape", type_annot=reshape_ty)
mod[reshape_gv] = reshape_primfunc
mod = tvm.parser.parse(
"""
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = call_lowered(@multiply, (%x, %y, %z));
let %x_12: Tensor[(128, 128), float32] = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = call_lowered(@reshape, (%x_12,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True) |
;
%x_14
}
""",
"from_string",
mod,
metatable,
)
mod["main"] = MarkReshapeOnlyMutator().visit(mod["main"])
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert not reshapes_present, "Reshape should have been removed."
return
def test_fused_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def mul_primfunc(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
D[vi, vj] = A[vi, vk] * B[vj, vk]
@T.prim_func
def fused_reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
mul_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
mul_gv = relay.GlobalVar("multiply", type_annot=mul_ty)
mod[mul_gv] = mul_primfunc
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("fused_reshape", type_annot=reshape_ty)
mod[reshape_gv] = fused_reshape_primfunc
mod = tvm.parser.parse(
"""
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32], |
%z {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = call_lowered(@multiply, (%x, %y, %z));
let %x_12: Tensor[(128, 128), float32] = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = call_lowered(@fused_reshape, (%x_12,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert reshapes_present, "Reshape should have been removed."
return
if __name__ == "__main__":
tvm.testing.main() |
"""Benchmarking Relay VM using models from MXNet.""" |
import numpy as np |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.