text
stringlengths 1
2.05k
|
---|
import autotvm, te, topi
from tvm.autotvm.task.space |
import FallbackConfigEntity
from tvm.contrib |
import nnpack
from tvm.contrib.pickle_memoize |
import memoize
from tvm.topi.utils |
import get_const_tuple
def verify_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
devices,
dilation=1,
add_bias=False,
add_relu=False,
):
"""Verify conv2d nchw workload."""
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation)
)
in_height = in_width = in_size
placholder_a = te.placeholder((batch, in_channel, in_height, in_width), name="A")
placeholder_w = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(placholder_a.shape)
w_shape = get_const_tuple(placeholder_w.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = placholder_a.dtype
@memoize("topi.tests.test_topi_conv2d_nchw.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skipping %s becuase it is not enabled" % device)
print("Running on target: %s" % device)
with tvm.target.Target(device):
result_c = topi.nn.conv2d(
placholder_a,
placeholder_w,
stride,
padding,
dilation,
data_layout="NCHW", |
out_dtype=dtype,
)
if add_bias:
result_c = topi.add(result_c, bias)
if add_relu:
result_c = topi.nn.relu(result_c)
schedule = topi.generic.schedule_conv2d_nchw([result_c])
buff_a = tvm.nd.array(a_np, dev)
buff_w = tvm.nd.array(w_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros(get_const_tuple(result_c.shape), dtype=result_c.dtype), dev)
if add_bias:
func = tvm.build(
schedule,
[placholder_a, placeholder_w, bias, result_c],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(buff_a, buff_w, buff_b, buff_c)
else:
func = tvm.build(
schedule,
[placholder_a, placeholder_w, result_c],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(buff_a, buff_w, buff_c)
tvm.testing.assert_allclose(buff_c.numpy(), c_np, rtol=1e-4)
for device in devices:
check_device(device) |
class WinogradFallback(autotvm.FallbackContext):
"""Winograd fallbacks."""
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = FallbackConfigEntity()
cfg.template_key = "winograd_nnpack_fp32"
self.memory[key] = cfg
return cfg
def test_conv2d_nchw():
"""Verify conv2d nchw winograd works."""
if not tvm.get_global_func(
"tvm.contrib.nnpack.convolution_inference_without_weight_transform", True
):
skip("extern function is not available")
if not nnpack.is_available():
skip("nnpack is not available")
devices = ["llvm -device=arm_cpu"]
autotvm.GLOBAL_SCOPE.silent = True
with WinogradFallback():
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 3, 192, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 4, 192, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 12, 96, 24, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 24, 48, 48, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 48, 24, 96, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 96, 12, 180, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 180, 6, 220, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 220, 6, 180, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 180, 12, 96, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 96, 24, 48, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 48, 48, 24, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 24, 96, 12, 3, 1 |
, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 12, 192, 1, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 3, 3, 3, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 13, 71, 59, 3, 1, 1, devices=devices)
autotvm.GLOBAL_SCOPE.silent = False
if __name__ == "__main__": |
import pytest
pytest.main([__file__]) |
from collections |
import namedtuple |
import tvm
from tvm |
import relay
from tvm.relay |
import quantize as qtz |
import mxnet as mx
from mxnet |
import gluon |
import logging |
import os |
import tvm.testing
logging.basicConfig(level=logging.INFO)
Config = namedtuple(
"Config",
[
"model",
"nbit_input",
"dtype_input",
"nbit_output",
"dtype_output",
"global_scale",
"expected_acc",
],
)
def get_val_data(model_name, rec_val, batch_size, num_workers=4):
rec_val = os.path.expanduser(rec_val)
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
img_size = 299 if model_name == "inceptionv3" else 224
val_data = mx.io.ImageRecordIter(
path_imgrec=rec_val,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=256,
data_shape=(3, img_size, img_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
)
return val_data, batch_fn
def get_model(model_name, batch_size, qconfig, original=False):
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
img_size = 299 if model_name == "inceptionv3" else 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
logging.debug("original")
logging.debug(mod.astext(show_meta_data=False))
if original:
return mod, params
with qconfig:
logging.debug("current quantize config")
logging.debug(qtz.current_qconfig())
qfunc = qtz.quantize(mod, params)
logging.debug("after quantize")
logging.debug(qfunc.astext(show_meta_data=False))
return qfunc, params
def eval_acc(
model, params, dataset, batch_fn, target=tvm.target.cuda(), device=tvm.cuda(), log_interval=500
):
with tvm.tra |
nsform.PassContext(opt_level=3):
lib = relay.build(model, target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](device))
dataset.reset()
batch_size = dataset.batch_size
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1.reset()
acc_top5.reset()
for i, batch in enumerate(dataset):
data, label = batch_fn(batch, [mx.cpu(0)])
m.set_input("data", tvm.nd.array(data[0].asnumpy()))
m.run()
out_arr = m.get_output(0)
acc_top1.update(label, [mx.nd.array(out_arr.numpy())])
acc_top5.update(label, [mx.nd.array(out_arr.numpy())])
if not (i + 1) % log_interval:
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
nsamples = (i + 1) * batch_size
logging.info("[%d samples] validation: acc-top1=%f acc-top5=%f", nsamples, top1, top5)
logging.info("[final] validation: acc-top1=%f acc-top5=%f", top1, top5)
return top1
@tvm.testing.requires_gpu
def test_quantize_acc(cfg, rec_val):
qconfig = qtz.qconfig(
skip_conv_layers=[0],
nbit_input=cfg.nbit_input,
nbit_weight=cfg.nbit_input,
global_scale=cfg.global_scale,
dtype_input=cfg.dtype_input,
dtype_weight=cfg.dtype_input,
dtype_activation=cfg.dtype_output,
debug_enabled_ops=None,
)
batch_size = 1
model, params = get_model(cfg.model, batch_size, qconfig)
val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=batch_size)
acc = eval_acc(model, params, val_data, batch_fn)
assert acc > cfg.expected_acc
return acc
if __name__ == "__main__":
rec_val = "/scratch/tqchen/imagenet/val.rec"
results = []
configs = [
Config(
"mobilenetv2_1.0",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=4.0,
expected_acc=0.666, |
),
Config(
"mobilenetv2_1.0",
nbit_input=8,
dtype_input="int8",
nbit_output=16,
dtype_output="int16",
global_scale=4.0,
expected_acc=0.666,
),
Config(
"resnet18_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=16,
dtype_output="int16",
global_scale=8.0,
expected_acc=0.692,
),
Config(
"resnet18_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.692,
),
Config(
"resnet34_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.733,
),
Config(
"resnet50_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.747,
),
Config(
"resnet101_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.756,
),
]
for config in configs:
acc = test_quantize_acc(config, rec_val)
results.append((config, acc))
for res in results:
print(res) |
import os |
import sys |
import logging |
import pytest
pytest.importorskip("onnx") |
import onnx |
import tvm
from tvm |
import relay
from tvm.relay |
import quantize as qtz |
import tvm.testing
from test_quantization_accuracy |
import Config, get_val_data, eval_acc
logging.basicConfig(level=logging.INFO)
def calibrate_dataset(model_name, rec_val, batch_size, calibration_samples):
val_data, _ = get_val_data(model_name, rec_val=rec_val, batch_size=batch_size)
val_data.reset()
for i, batch in enumerate(val_data):
if i * batch_size >= calibration_samples:
break
data = batch.data[0].asnumpy()
yield {"data": data}
def download_file(url_base, file_name):
if not os.path.exists(file_name) or not os.path.isfile(file_name): |
import urllib.request as urllib2
url = "{}/{}".format(url_base, file_name)
try:
print("download from {}".format(url))
if sys.version_info >= (3,):
urllib2.urlretrieve(url, file_name)
else:
f = urllib2.urlopen(url)
data = f.read()
with open(file_name, "wb") as code:
code.write(data)
except Exception as err:
if os.path.exists(file_name):
os.remove(file_name)
raise Exception("download {} failed due to {}!".format(file_name, repr(err)))
def get_onnx_model(model_name, batch_size, qconfig, original=False, dataset=None):
assert model_name == "vit32", "Only support vit32 model!"
base = "https:
logfile = "gtx1660_vit_B32_224.log"
onnx_path = "vit_B32_224.onnx"
download_file(base, logfile)
download_file(base, onnx_path)
onnx_graph = onnx.load(open(onnx_path, "rb"))
data_shape = (batch_size, 3, 224, 224)
mod, params = relay.frontend.from_onnx(onnx_graph, {"data": data_shape})
with tvm.transform.PassContext(opt_level=3):
qfunc = relay.quantize.prerequisite_optimize(mod, params=params)
logging.debug("original")
logging.debug(qfunc.astext(show_meta_data=False))
if original:
return qfunc, params, logfile
with qconfig:
logging.debug("current quantize config")
logging.debug(qtz.current_qconfig())
if dataset is not None:
with tvm.target.cuda():
with tvm.autotvm.apply_history_best(logfile):
qfunc = qtz.quantize(qfunc, params, dataset=dataset)
else:
qfunc = qtz.quantize(qfunc, params)
logging.debug("after quantize")
logging.debug(qfunc.astext(show_meta_data=False))
return qfunc, params, logfile
@tvm.testing.requires_gpu
def test_onnx_quantize_acc(cfg, rec_val, batch_size=1, original=False):
qconfig = qtz.qconfig(
skip_conv_layers=[0],
skip_d |
ense_layer=False,
nbit_input=cfg.nbit_input,
nbit_weight=cfg.nbit_input,
dtype_input=cfg.dtype_input,
dtype_weight=cfg.dtype_input,
dtype_activation=cfg.dtype_output,
debug_enabled_ops=None,
calibrate_mode="percentile",
calibrate_chunk_by=8,
)
dataset = list(calibrate_dataset(cfg.model, rec_val, batch_size, 64))
model, params, logfile = get_onnx_model(
cfg.model, batch_size, qconfig, original=original, dataset=dataset
)
val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=batch_size)
with tvm.autotvm.apply_history_best(logfile):
acc = eval_acc(model, params, val_data, batch_fn, log_interval=1000)
assert acc > cfg.expected_acc
return acc
if __name__ == "__main__":
rec_val = "/scratch/tqchen/imagenet/val.rec"
configs = [
Config(
"vit32",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.727,
),
]
for config in configs:
acc = test_onnx_quantize_acc(config, rec_val, batch_size=1, original=True)
print("{}-float32: {}".format(config.model, acc))
acc = test_onnx_quantize_acc(config, rec_val, batch_size=1, original=False)
print("{}-int8: {}".format(config.model, acc)) |
import numpy as np |
import tvm |
import tvm.testing
from tvm.script |
import tir as T
from tvm.runtime.ndarray |
import array
from tvm.relay.backend |
import Executor
from tvm.relay.backend.aot |
import CreateExecutorMetadata
from tvm.relay |
import TensorType
from tvm.tir.usmp.utils |
import PoolAllocation
from tvm.ir.memory_pools |
import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_executor_metadata(executor_metadata, expected_metadata):
assert list(executor_metadata.inputs) == expected_metadata["inputs"]
assert list(executor_metadata.input_tensor_types) == expected_metadata["input_tensor_types"]
assert list(executor_metadata.outputs) == expected_metadata["outputs"]
assert list(executor_metadata.output_tensor_types) == expected_metadata["output_tensor_types"]
assert list(executor_metadata.pools) == expected_metadata["pools"]
assert executor_metadata.devices == expected_metadata["devices"]
assert executor_metadata.executor == expected_metadata["executor"]
assert executor_metadata.mod_name == expected_metadata["mod_name"]
assert executor_metadata.interface_api == expected_metadata["interface_api"]
assert executor_metadata.unpacked_api == expected_metadata["unpacked_api"]
assert executor_metadata.workspace_alignment == expected_metadata["workspace_alignment"]
assert executor_metadata.constant_alignment == expected_metadata["constant_alignment"]
assert set(executor_metadata.pool_inputs.keys()) == set(expected_metadata["pool_inputs"].keys())
assert set(executor_metadata.io_pool_allocations.keys()) == set(
expected_metadata["io_pool_allocations"].keys()
)
def test_create_executor_metadata_single_func():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(
a: T.handle, output: T.handle, workspace: T.Ptr[T.uint8], constants: T.Ptr[T.uint8]
) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["test_device"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_3 = |
T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
target = Module["__tvm_main__"].attrs["target"]
executor = Executor("aot", {"interface-api": "c"})
workspace_pool_info = AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
3,
)
constant_pool_info = AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
2,
)
io_pool_allocations = {
"a": PoolAllocation(WorkspacePoolInfo("sram", [target]), 0),
"output": PoolAllocation(WorkspacePoolInfo("sram", [target]), 0),
}
mod = Module.with_attr("io_tensor_pool_allocations", io_pool_allocations)
mod["__tvm_main__"] = mod["__tvm_main__"].with_attr(
"pool_args",
[
constant_pool_info,
workspace_pool_info,
],
)
f = mod["__tvm_main__"]
expected_metadata = {
"inputs": [f.params[0]],
"input_tensor_types": [TensorType((5, 7), "float32")],
"outputs": ["output"],
"output_tensor_types": [TensorType((5, 7), "float32")],
" |
pools": f.params[2:],
"devices": f.attrs["devices"],
"executor": "aot",
"mod_name": "test_mod",
"interface_api": "c",
"unpacked_api": False,
"workspace_alignment": 16,
"constant_alignment": 1,
"pool_inputs": {
f.params[2]: workspace_pool_info,
f.params[3]: constant_pool_info,
},
"io_pool_allocations": io_pool_allocations,
}
executor_metadata = CreateExecutorMetadata(mod, "test_mod", executor, 16, 1)
_check_executor_metadata(executor_metadata, expected_metadata)
def test_create_executor_metadata_no_usmp():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(
a: T.handle, output: T.handle
) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["test_device"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_ |
add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
executor = Executor("aot", {"interface-api": "c"})
mod = Module
f = mod["__tvm_main__"]
expected_metadata = {
"inputs": [f.params[0]],
"input_tensor_types": [TensorType((5, 7), "float32")],
"outputs": ["output"],
"output_tensor_types": [TensorType((5, 7), "float32")],
"pools": f.params[2:],
"devices": f.attrs["devices"],
"executor": "aot",
"mod_name": "test_mod",
"interface_api": "c",
"unpacked_api": False,
"workspace_alignment": 16,
"constant_alignment": 1,
"pool_inputs": {},
"io_pool_allocations": {},
}
executor_metadata = CreateExecutorMetadata(mod, "test_mod", executor, 16, 1)
_check_executor_metadata(executor_metadata, expected_metadata)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm.script |
import tir as T
from tvm.runtime.ndarray |
import array
from tvm.relay.backend.aot |
import CreateFunctionMetadata
from tvm.ir.memory_pools |
import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_function_metadata(function_metadata, expected_infos):
for symbol, expected_info in expected_infos.items():
func_info = function_metadata[symbol]
key, value = func_info.workspace_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["workspace_sizes"]
key, value = func_info.io_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["io_sizes"]
key, value = func_info.constant_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["constant_sizes"]
key, value = func_info.tir_primfuncs.items()[0]
assert str(key) == expected_info["target"]
tvm.ir.assert_structural_equal(value, expected_info["tir_primfuncs"])
def test_create_function_metadata_workspace_allocate_only():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.r |
einterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 432,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_allocate_only():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 140,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_pool_only():
@tvm.script.ir_module
class Module:
@T.prim_func
def __ |
tvm_main__(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 256,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_pool_only():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.r |
einterpret(T.uint64(0), dtype="handle"), dtype="int32"))
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 256,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_all_single_func():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call |
_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 688,
"io_sizes": 280,
"constant_sizes": 652,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
),
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_multi_funcs():
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
@T.prim_func
def test_fused_add(a: T.handle, b: T.handle, output: T.handle) -> None:
T.func_attr({"global_symbol": "test_mod_test_fused_add", "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], |
dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
sid_0 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("magic", a_buffer.data, b_buffer.data, sid_0, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
},
"test_fused_add": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 144,
"io_sizes": 420,
"constant_sizes": 140,
"tir_primfuncs": Module["test_fused_add"],
},
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
if __name__ == "__main__":
tvm.testing.main() |
"""AOT with C Device API Tests""" |
import re
from collections |
import OrderedDict |
import numpy as np |
import pytest |
import tvm.testing
from tvm |
import relay
from tvm.ir.module |
import IRModule
from tvm.testing.aot |
import AOTTestModel, generate_ref_data, compile_models
from tvm.micro.testing.aot_test_utils |
import AOT_DEFAULT_RUNNER
@pytest.fixture(name="device_api_main_func")
def fixture_device_api_main_func():
"""Test function generator which generates C Device API calls"""
pytest.importorskip("ethosu.vela") |
import tensorflow as tf |
import tflite.Model
from tests.python.contrib.test_ethosu.infra |
import create_test_runner, generate_ref_data_tflite
from tvm.relay.op.contrib.ethosu |
import partition_for_ethosu
tf.config.run_functions_eagerly(True) |
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.nn.max_pool(x, [1, 2], [1, 2], "SAME")
def representative_dataset():
for _ in range(100):
data = np.random.rand(1, 3, 4, 3)
yield [data.astype(np.float32)]
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec([1, 3, 4, 3], dtype=tf.float32)
)
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_graph = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": [1, 3, 4, 3]},
dtype_dict={"x": "int8"},
)
mod = partition_for_ethosu(relay_module, params)
input_data, output_data = generate_ref_data_tflite(tflite_graph)
def compile_to_main_func(interface_api="c", use_unpacked_api=True):
test_runner = create_test_runner()
compiled_models = compile_models(
models=AOTTestModel(
module=mod,
inputs=input_data,
outputs=output_data,
),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
main_ir_module = compiled_models[0].executor_factory.lowered_ir_mods.items()[0][1]
main_func = main_ir_module["__tvm_main__"]
return main_func
return compile_to_main_func
@pytest.fixture(name="non_device_api_main_func")
def fixture_non_device_api_main_func():
"""Test function generator which does not generate C De |
vice API calls"""
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
def compile_to_main_func(interface_api="c", use_unpacked_api=True):
test_runner = AOT_DEFAULT_RUNNER
compiled_models = compile_models(
models=AOTTestModel(
module=IRModule.from_expr(func),
inputs=inputs,
outputs=output_list,
),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
main_ir_module = list(compiled_models[0].executor_factory.lowered_ir_mods.values())[0]
main_func = main_ir_module["__tvm_main__"]
return main_func
return compile_to_main_func
def test_device_api_hooks_unpacked_api(device_api_main_func):
"""Check for Device API hooks with unpacked internal calls"""
main_func = device_api_main_func(interface_api="c", use_unpacked_api=True)
assert (
str(main_func.body[0])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUActivate",'
+ " device_context_ethos_u))\n"
)
print("main func", repr(main_func.body))
assert (
str(main_func.body[1][0][0][0])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUOpen",'
+ " device_context_ethos_u))\n"
)
regex = re.compile(
r"tir\.tvm_check_return\("
r"0, -1, "
r'tir\.call_extern\("tvmgen_default_ethos_u_main_0", '
r"\w+, \w+, device_context_ethos_u\)\)"
)
assert regex.match(str(main_func.body[1][0][0][1]))
assert (
str(main_func.body[1][0][0][2]) |
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUClose",'
+ " device_context_ethos_u))\n"
)
assert (
str(str(main_func.body[2]))
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUDeactivate",'
+ " device_context_ethos_u))\n"
)
@pytest.mark.skip(
"Skipping this test as this is incorrectly using Arm(R) Ethos(TM)-U NPU "
"with packed calling convention which is not supported by the NPU codegen's "
"TIR to Runtime Hook. We need to use a different target to test this feature"
)
def test_device_api_hooks_packed_api(device_api_main_func):
"""Check for Device API hooks with packed internal calls"""
main_func = device_api_main_func(interface_api="packed", use_unpacked_api=False)
assert (
str(main_func.body[0][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUActivate",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
assert (
str(main_func.body[1].body.body[0][0][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUOpen",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
assert (
str(main_func.body[1].body.body[0][0][1][0].value)
== "@tir.tvm_call_cpacked("
+ '"tvmgen_default_ethos_u_main_0",'
+ " input: handle, output: handle,"
+ " device_context_ethos_u: handle,"
+ " dtype=int32)"
)
assert (
str(main_func.body[1].body.body[0][0][2].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUClose",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
assert (
str(main_func.body[2][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUDeactivate",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))" |
)
def test_without_device_api_unpacked_api(non_device_api_main_func):
"""Test a graph without the Device API with the unpacked internal calls"""
main_func = non_device_api_main_func(interface_api="c", use_unpacked_api=True)
assert (
str(main_func.body)
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"tvmgen_default_fused_multiply",'
+ " x_buffer_var, y_buffer_var, output_buffer_var))\n"
)
def test_without_device_api_packed_api(non_device_api_main_func):
"""Test a graph without the Device API with the packed internal calls"""
main_func = non_device_api_main_func(interface_api="packed", use_unpacked_api=False)
assert str(main_func.body) == (
'tir.tvm_call_cpacked("tvmgen_default_fused_multiply", '
"tir.tvm_stack_make_array(x_buffer_var, tir.tvm_stack_make_shape(10, 10), tir.reinterpret((uint64)0), (uint32)2, float32(0), 0), "
"tir.tvm_stack_make_array(y_buffer_var, tir.tvm_stack_make_shape(1, 10), tir.reinterpret((uint64)0), (uint32)2, float32(0), 0), "
"tir.tvm_stack_make_array(output_buffer_var, tir.tvm_stack_make_shape(10, 10), tir.reinterpret((uint64)0), (uint32)2, float32(0), 0), "
"tir.reinterpret((uint64)0))\n"
)
if __name__ == "__main__":
tvm.testing.main() |
"""AOT with C++ Runtime Tests""" |
import re |
import textwrap |
import numpy as np |
import pytest |
import tvm
from tvm |
import IRModule
from tvm |
import relay
from tvm.relay |
import backend, testing
from tvm.testing.aot |
import generate_ref_data
def test_error_c_interface():
"""Checks that an error occurs when using the packed API in combination with C interface"""
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(
tvm.TVMError,
match=re.escape(
'Need unpacked-api == false (got: 0) and interface-api == "packed" (got: c) when '
"targeting c++ runtime"
),
):
tvm.relay.build(
IRModule.from_expr(func),
target="llvm",
executor=backend.Executor("aot", {"interface-api": "c"}),
)
@pytest.mark.parametrize("enable_usmp", [True, False])
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_conv2d(enable_usmp, target_kind):
"""Tests compilation of convolutions"""
relay_model = textwrap.dedent(
"""\
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
ir_mod = tvm.parser.fromtext(relay_model)
main_func = ir_mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(t |
ype_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
ref_outputs = generate_ref_data(ir_mod, inputs, params)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.usmp.enable": enable_usmp,
},
):
mod = tvm.relay.build(
ir_mod,
params=params,
target=target_kind,
executor=backend.Executor("aot", {"interface-api": "packed", "unpacked-api": False}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
runner.set_input(**inputs)
runner.run()
assert (runner.get_output(0).numpy() == list(ref_outputs.values())[0]).all()
@pytest.mark.parametrize("enable_usmp", [True, False])
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_mobilenet(enable_usmp: bool, target_kind: str):
"""Full network test with Mobilenet"""
ir_mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in ir_mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
ref_outputs = generate_ref_data(ir_mod, inputs, params)
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": enable_usmp}
):
mod = tvm.relay.build(
ir_mod,
params=params,
target=target_kind,
executor=backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-st |
d=gnu++17", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
runner.set_input(**inputs)
runner.run()
assert (runner.get_output(0).asnumpy() == list(ref_outputs.values())[0]).all()
def test_module_list():
"""Checks the correct list of module names is generated"""
input_x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(input_x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
mod = tvm.relay.build(
tvm.IRModule.from_expr(tvm.relay.Function([input_x], expr)),
target="c",
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
mod_name="unusual_module_name_fred",
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11"])
loaded_mod = tvm.runtime.load_module(test_so_path)
list_module_names = loaded_mod.get_function("list_module_names")
names_expected = ["unusual_module_name_fred"]
assert list(sorted(names_expected)) == list(sorted(list_module_names()))
def test_create_executor():
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
actual = relay.create_executor(
"aot", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr)), target="c"
).evaluate()(np.array([2], dtype="float32"))
np.isfinite(np.array([3], dtype="float32"))
np.testing.assert_allclose(actual.numpy(), np.array([3], dtype="float32"))
def test_pass_wrong_device_arg():
"""Ensure an error is generated if the incorrect number of devices are passed"""
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
with tvm.transform.PassContext(opt_level=3, confi |
g={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(tvm.relay.Function([x], expr)),
target="c",
executor=backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
with pytest.raises(tvm.TVMError) as error:
tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0), tvm.cpu(0)))
assert (
"Check failed: devices_.size() == 1 (2 vs. 1) : Expect exactly 1 device passed."
in str(error.exception)
)
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
@pytest.mark.parametrize("input_name", ["input:0", "input@0", "input_0"])
def test_aot_input_name_with_special_character(target_kind: str, input_name: str):
"""Test name transforms in AOT for input names with special characters."""
dtype = "float32"
input_1 = relay.var(input_name, shape=(10, 5), dtype=dtype)
weight = relay.var("weight", shape=(1, 5), dtype=dtype)
output = relay.add(input_1, weight)
func = relay.Function([input_1, weight], output)
input_data = np.random.rand(10, 5).astype(dtype)
weight_data = np.random.rand(1, 5).astype(dtype)
expected_output = input_data + weight_data
params = {"weight": weight_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(func),
target=target_kind,
params=params,
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-std=gnu++17", "-g3", "-O0"])
for name in ["input_0", input_name]:
loaded_mod |
= tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
inputs = {name: input_data}
runner.set_input(**inputs)
input_ind = runner.get_input_index(name)
assert (runner.get_input(input_ind).asnumpy() == input_data).all()
runner.run()
assert (runner.get_output(0).asnumpy() == expected_output).all()
if __name__ == "__main__":
tvm.testing.main() |
"""AOT with C Runtime Tests"""
from collections |
import OrderedDict |
import re |
import os |
import tarfile |
import pathlib |
import numpy as np |
import pytest |
import tvm
from tvm |
import relay, TVMError
from tvm.contrib |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.