text
stringlengths 1
2.05k
|
---|
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
new_args = new_args + new_args
tasks = tasks + tasks
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
executor = DPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
executor = PBQPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][ |
0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
def test_tuple():
target = "llvm"
dtype = "float32"
dshape = (1, 5, 32, 32)
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=2, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(data, w1, channels=3, kernel_size=(3, 3), padding=(1, 1))
out = relay.concatenate([conv0, conv1], axis=1)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
net["main"], target=target, params=params, ops=(conv2d,)
)
new_args = [
_create_args(
(1, 5, 32, 32), (2, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 5, 32, 32), (3, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
]
costs = [0.01, 0.012, 0.03, 0.04]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [1, 2]],
["tile_ow", "sp", [4, 8]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [1, 3]],
["tile_ow", "sp", [2, 16]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [2, 1]], |
["tile_ow", "sp", [4, 8]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [3, 1]],
["tile_ow", "sp", [2, 16]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
new_args = new_args + new_args
tasks = tasks + tasks
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
executor = DPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[2][0].config, records[1][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
executor = PBQPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[2][0].config, records[1][0].config]
assert expec |
ted_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
def test_triangle_block():
target = "llvm"
dtype = "float32"
dshape = (1, 3, 8, 8)
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(conv0, w1, channels=32, kernel_size=(1, 1))
w2 = relay.var("w2_weight")
conv2 = relay.nn.conv2d(data, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.concatenate([conv0, conv1, conv2], axis=1)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
net["main"], target=target, params=params, ops=(conv2d,)
)
new_args = [
_create_args(
(1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 16, 8, 8),
(32, 16, 1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 1),
layout,
layout,
dtype,
dtype,
),
_create_args(
(1, 3, 8, 8), (32, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
]
costs = [0.04, 0.012, 0.03, 0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [2, 8]],
["tile_oc", |
"sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
new_args = new_args + new_args
tasks = tasks + tasks
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(targ |
et=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
executor = DPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
executor = PBQPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
if __name__ == "__main__":
test_graph_tuner_layout_transform()
test_DPTuner_run()
test_PBQPTuner_run()
test_many_sub_graphs()
test_tuple()
test_triangle_block() |
import pytest |
import tvm
from tvm |
import te
from tvm |
import autotvm, relay
from tvm.relay.testing |
import synthetic
from tvm.autotvm.graph_tuner.utils |
import (
has_multiple_inputs,
get_direct_ancestor,
get_in_nodes,
get_out_nodes,
expr2graph,
bind_inputs,
)
from tvm.autotvm.graph_tuner._base |
import OPT_OUT_OP
from tvm.autotvm.graph_tuner.utils.traverse_graph |
import _replace_device_with_tracing
from tvm.relay.expr |
import Call, TupleGetItem, Tuple, Var
def verify_has_multiple_inputs(node_list, node_idx, input_names, expected_result):
out = has_multiple_inputs(node_list, node_idx, input_names, OPT_OUT_OP)
assert out == expected_result, "Output mismatch: expecting checking %s to be %s but got %s." % (
node_list[node_idx]["op"],
str(expected_result),
str(out),
)
def test_has_multiple_inputs():
data = relay.var("data")
out1 = data * relay.expr.const(3.0)
w0 = relay.var("w0")
out2 = relay.nn.conv2d(data, w0)
out = relay.add(out1, out2)
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1)})
target_ops = [relay.op.get("nn.conv2d")]
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
input_names = ["data"]
verify_has_multiple_inputs(node_list, 2, input_names, False)
verify_has_multiple_inputs(node_list, 4, input_names, False)
verify_has_multiple_inputs(node_list, 5, input_names, True)
def test_expr2graph():
mod, _ = synthetic.get_workload()
node_dict = {}
node_list = []
target_ops = [relay.op.get("nn.conv2d")]
op_name_list = []
def _count_node(node):
if isinstance(node, Call):
op_name_list.append(node.op)
elif isinstance(node, (Var, TupleGetItem, Tuple)):
op_name_list.append(None)
relay.analysis.post_order_visit(mod["main"], _count_node)
expr2graph(mod["main"], target_ops, node_dict, node_list, tvm.target.Target("llvm"))
assert len(node_list) == len(op_name_list)
for i, item in enumerate(zip(op_name_list, node_list)):
op_name, node = item
assert op_name == node["op"], "%dth Node operator mismatch: expecting %s but got %s" % (
i,
str(op_name),
str(node["op"]),
)
def test_get_direct_ancestor():
data = relay.var("data")
w0 = relay.var("w0")
out1 = rela |
y.nn.conv2d(data, w0)
out2 = relay.add(out1, data * relay.expr.const(5.0))
out3 = out2 + relay.expr.const(2.5)
w1 = relay.var("w1")
out = relay.nn.conv2d(out3, w1)
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)})
target_ops = [relay.op.get("nn.conv2d")]
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
visited_dict = {}
input_names = ["data"]
out = get_direct_ancestor(node_list, visited_dict, target_ops, 5, input_names)
assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)
out = relay.add(relay.log(data), relay.sqrt(data))
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224)})
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
out = get_direct_ancestor(node_list, visited_dict, target_ops, 3, input_names)
assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)
def test_get_in_nodes():
data = relay.var("data")
w0 = relay.var("w0")
out1 = relay.nn.conv2d(data, w0)
out2 = relay.add(out1, data)
out3 = out2 + relay.expr.const(2.5)
w1 = relay.var("w1")
out = relay.nn.conv2d(out3, w1)
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)})
target_ops = [relay.op.get("nn.conv2d")]
input_names = ["data"]
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
out = get_in_nodes(node_list, target_ops, input_names)
expected_out = {3: [0], 4: [3, 0], 7: [4]}
diff_set = set(out) ^ set(expected_out)
if len(diff_set) != 0:
raise RuntimeError(
"Output mismatch: expecting %s but got %s." % (str(e |
xpected_out), str(out))
)
def test_get_out_nodes():
in_nodes_dict = {8: [4], 4: [3, 0], 3: [0]}
expected_out = {0: [3, 4], 3: [4], 4: [8], 8: []}
out = get_out_nodes(in_nodes_dict)
diff_set = set(out) ^ set(expected_out)
if len(diff_set) != 0:
raise RuntimeError(
"Output mismatch: expecting %s but got %s." % (str(expected_out), str(out))
)
def test_target_device_replacement():
assert _replace_device_with_tracing("cuda") == "cuda -device=tracing"
assert (
_replace_device_with_tracing("cuda -device=some_device -libs=cudnn")
== "cuda -device=tracing -libs=cudnn"
)
assert (
_replace_device_with_tracing("llvm -device=arm_cpu -arg=xxx")
== "llvm -device=tracing -arg=xxx"
)
assert _replace_device_with_tracing("llvm -device=arm_cpu") == "llvm -device=tracing"
assert _replace_device_with_tracing("llvm -device=abc, def") == "llvm -device=tracing"
if __name__ == "__main__":
test_has_multiple_inputs()
test_expr2graph()
test_get_direct_ancestor()
test_get_in_nodes()
test_get_out_nodes() |
"""Test index based tuners""" |
import multiprocessing
from tvm.testing.autotvm |
import DummyRunner, get_sample_task
from tvm |
import autotvm
def test_grid_search_tuner():
"""Test GridSearchTuner"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner = autotvm.tuner.GridSearchTuner(task)
assert tuner.begin_idx == 0
assert tuner.end_idx == 64
assert tuner.index == 0
assert tuner.range_length == 64
assert tuner.visited_max == 64
tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15))
assert tuner.begin_idx == 8
assert tuner.end_idx == 16
assert tuner.index == 8
assert tuner.range_length == 8
assert tuner.visited_max == 8
tuner.tune(n_trial=8, measure_option=measure_option)
assert len(tuner.visited) == 8
assert not tuner.has_next()
task, _ = get_sample_task()
task.config_space.multi_filter(
filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
tuner = autotvm.tuner.GridSearchTuner(task)
assert tuner.begin_idx == 0
assert tuner.end_idx == 64
assert tuner.index == 5
assert tuner.range_length == 64
assert tuner.visited_max == 34
tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15))
assert tuner.begin_idx == 8
assert tuner.end_idx == 16
assert tuner.index == 12
assert tuner.range_length == 8
assert tuner.visited_max == 4
tuner.tune(n_trial=8, measure_option=measure_option)
assert len(tuner.visited) == 4
assert not tuner.has_next()
def grid_search_spawn():
assert multiprocessing.get_spawn_method(False) == "spawn"
test_grid_search_tuner()
def test_grid_search_tuner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=test_grid_search_tuner)
p.start()
p.join()
def test_random_tuner():
"""Test RandomTuner"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner = autotvm.tuner.RandomTuner(tas |
k, range_idx=(8, 15))
assert tuner.begin_idx == 8
assert tuner.end_idx == 16
assert tuner.range_length == 8
assert tuner.visited_max == 8
tuner.tune(n_trial=8, measure_option=measure_option)
assert len(tuner.visited) == 8
assert not tuner.has_next()
for idx in tuner.visited:
assert 8 <= idx <= 15
task, _ = get_sample_task()
task.config_space.multi_filter(
filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
tuner = autotvm.tuner.RandomTuner(task, range_idx=(8, 15))
assert tuner.begin_idx == 8
assert tuner.end_idx == 16
assert tuner.range_length == 8
assert tuner.visited_max == 4
tuner.tune(n_trial=8, measure_option=measure_option)
assert len(tuner.visited) == 4
assert not tuner.has_next()
for idx in tuner.visited:
assert 8 <= idx <= 15
if __name__ == "__main__":
test_grid_search_tuner()
test_grid_search_tuner_spawn()
test_random_tuner() |
"""Test builder and runner""" |
import logging |
import multiprocessing |
import concurrent |
import numpy as np |
import tvm
from tvm |
import te
from tvm.autotvm.measure |
import executor
from tvm.testing.autotvm |
import DummyRunner, bad_matmul, get_sample_task
from tvm |
import autotvm
from tvm.autotvm.measure.measure |
import MeasureErrorNo, MeasureResult
from tvm.autotvm |
import measure
from inspect |
import Signature
def test_task_tuner_without_measurement():
"""test task and tuner without measurement"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
logging.info("%s", task.config_space)
for tuner_class in [
autotvm.tuner.RandomTuner,
autotvm.tuner.GridSearchTuner,
autotvm.tuner.GATuner,
autotvm.tuner.XGBTuner,
]:
tuner = tuner_class(task)
tuner.tune(n_trial=10, measure_option=measure_option)
assert tuner.best_flops > 1
def task_tuner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_task_tuner_without_measurement()
def test_task_tuner_without_measurement_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=task_tuner_spawn)
p.start()
p.join()
def test_task_runner_with_ref_input():
"""test runner ref_input without measurement"""
refinp = [np.random.rand(128, 128) for i in range(3)]
runner = measure.LocalRunner()
runner.ref_input = refinp |
class DummyExecutor(measure.executor.Executor):
def __init__(self):
self.ran_dummy_executor = False
def submit(self, func, *args, **kwargs):
self.ran_dummy_executor = True
sig = Signature.from_callable(func)
assert sig.bind(*args, **kwargs).arguments["ref_input"] == refinp
dummy_future = concurrent.futures.Future()
dummy_future.set_result(None)
return dummy_future
runner.executor = DummyExecutor()
runner.run([None], [None])
assert runner.executor.ran_dummy_executor
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
test_task_tuner_without_measurement()
test_task_tuner_without_measurement_spawn()
test_task_runner_with_ref_input() |
"""test the correctness of dump and load of data log"""
from io |
import StringIO
from os |
import PathLike |
import time
from tvm.contrib |
import utils
from tvm |
import autotvm
from tvm.autotvm.measure |
import MeasureInput, MeasureResult, MeasureErrorNo
from tvm.autotvm.record |
import encode, decode, ApplyHistoryBest, measure_str_key
from tvm.testing.autotvm |
import get_sample_task
def test_load_dump():
task, target = get_sample_task()
inp = MeasureInput(target, task, task.config_space.get(0))
result = MeasureResult(
(2.0, 2.23, 0.23, 0.123, 0.234, 0.123), MeasureErrorNo.NO_ERROR, 2.3, time.time()
)
for protocol in ["json", "pickle"]:
row = encode(inp, result, protocol=protocol)
inp_2, result_2 = decode(row, protocol=protocol)
assert measure_str_key(inp) == measure_str_key(inp_2), "%s vs %s" % (
measure_str_key(inp),
measure_str_key(inp_2),
)
assert result.costs == result_2.costs
assert result.error_no == result_2.error_no
assert result.timestamp == result_2.timestamp
def test_file_io():
temp = utils.tempdir()
file_path = temp.relpath("temp.log")
tsk, target = get_sample_task()
inputs = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(0, 10)]
results = [MeasureResult((i,), 0, 0, 0) for i in range(0, 10)]
invalid_inp = MeasureInput(target, tsk, tsk.config_space.get(10))
invalid_res = MeasureResult((10,), 0, 0, 0)
invalid_inp.config._entity_map = {}
with open(file_path, "w") as fo:
cb = autotvm.callback.log_to_file(fo)
cb(None, inputs, results)
cb(None, [invalid_inp], [invalid_res])
ref = zip(inputs, results)
for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
assert x[1] == y[1]
hist_best = ApplyHistoryBest([file_path, file_path])
x = hist_best.query(target, tsk.workload)
assert str(x) == str(inputs[0][2])
def test_apply_history_best(tmpdir):
tsk, target = get_sample_task()
best = str(tsk.config_space.get(2))
inputs_batch_1 = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(3)]
results_batch_1 = [MeasureResult((i,), 0, 0, 0) for i in range(1, 3)]
results_batch_1.append(MeasureResult((0.5,), 0, 2.3, 0))
filepath_batch_1 = tmpdir / "batch_1.log"
with open(filepath_batch_1, |
"w") as file:
autotvm.callback.log_to_file(file)(None, inputs_batch_1, results_batch_1)
assert isinstance(filepath_batch_1, PathLike)
hist_best = ApplyHistoryBest(filepath_batch_1)
assert str(hist_best.query(target, tsk.workload)) == best
hist_best = ApplyHistoryBest(str(filepath_batch_1))
assert str(hist_best.query(target, tsk.workload)) == best
stringio_batch_1 = StringIO()
assert isinstance(filepath_batch_1, PathLike)
callback = autotvm.callback.log_to_file(stringio_batch_1)
callback(None, inputs_batch_1, results_batch_1)
stringio_batch_1.seek(0)
hist_best = ApplyHistoryBest(stringio_batch_1)
assert str(hist_best.query(target, tsk.workload)) == best
hist_best = ApplyHistoryBest(list(zip(inputs_batch_1, results_batch_1)))
assert str(hist_best.query(target, tsk.workload)) == best
hist_best = ApplyHistoryBest(zip(inputs_batch_1, results_batch_1))
assert str(hist_best.query(target, tsk.workload)) == best
def test_apply_history_best_multiple_batches(tmpdir):
tsk, target = get_sample_task()
best = str(tsk.config_space.get(2))
inputs_batch_1 = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(2)]
results_batch_1 = [MeasureResult((i,), 0, 0, 0) for i in range(1, 3)]
filepath_batch_1 = tmpdir / "batch_1.log"
with open(filepath_batch_1, "w") as file:
autotvm.callback.log_to_file(file)(None, inputs_batch_1, results_batch_1)
inputs_batch_2 = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(2, 4)]
results_batch_2 = [MeasureResult((0.5,), 0, 0, 0), MeasureResult((3,), 0, 0, 0)]
filepath_batch_2 = tmpdir / "batch_2.log"
with open(filepath_batch_2, "w") as file:
autotvm.callback.log_to_file(file)(None, inputs_batch_2, results_batch_2)
hist_best = ApplyHistoryBest([filepath_batch_1, filepath_batch_2])
assert str(hist_best.query(target, tsk.workload)) == best
hist_best = ApplyHistoryBest(zip([filepa |
th_batch_1, filepath_batch_2]))
assert str(hist_best.query(target, tsk.workload)) == best
hist_best = ApplyHistoryBest(
zip(
[
zip(inputs_batch_1, results_batch_1),
zip(inputs_batch_2, results_batch_2),
]
)
)
assert str(hist_best.query(target, tsk.workload)) == best
if __name__ == "__main__":
test_load_dump()
test_apply_history_best()
test_file_io() |
"""Test space definition primitives"""
from tvm |
import te
from tvm.autotvm.task.space |
import ConfigSpace, FallbackConfigEntity
def gemm_func(cfg, N, filter_y=None, filter_x=None):
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=[k]), name="C")
s = te.create_schedule([C.op])
y, x = s[C].op.axis
cfg.define_split("tile_y", cfg.axis(y), num_outputs=2, filter=filter_y)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=2, filter=filter_x)
return s, [A, B, C]
def test_split():
cfg = ConfigSpace()
gemm_func(cfg, 128)
assert cfg.range_length == 64
assert len(cfg.space_map["tile_y"]) == 8
cfg = ConfigSpace()
cfg.define_split("tile_x", cfg.axis(256), policy="factors", num_outputs=3)
assert len(cfg.space_map["tile_x"]) == 45
cfg.define_split("tile_y", cfg.axis(256), policy="power2", num_outputs=3)
assert len(cfg.space_map["tile_y"]) == 45
cfg.define_split("tile_z", cfg.axis(256), policy="verbose", num_outputs=3)
assert len(cfg.space_map["tile_z"]) == 45
cfg.define_split("tile_a", cfg.axis(224), policy="factors", num_outputs=3)
assert len(cfg.space_map["tile_a"]) == 63
cfg.define_split("tile_b", cfg.axis(224), policy="power2", num_outputs=3)
assert len(cfg.space_map["tile_b"]) == 36
cfg.define_split("tile_c", cfg.axis(224), policy="verbose", num_outputs=3)
assert len(cfg.space_map["tile_c"]) == 84
def count4(n):
cnt = 0
for a in range(0, n + 1):
for b in range(0, n - a + 1):
cnt += n - a - b + 1
return cnt
n = 25
cfg = ConfigSpace()
cfg.define_split("x", cfg.axis(2**n), policy="factors", num_outputs=4)
assert len(cfg.space_map["x"]) == count4(n)
cfg = FallbackConfigEntity()
cfg.define_split("tile_n", cfg.axis(128), num_outputs=3)
cfg.fallback_split("tile_n", [-1, 8, 4])
cfg.define_split("tile_n", cfg.axis(128), num_outputs=3)
assert cfg["ti |
le_n"].size == [4, 8, 4]
cfg = FallbackConfigEntity()
cfg.define_split("tile_n", cfg.axis(49), num_outputs=3)
cfg.fallback_split("tile_n", [-1, 8, 4])
assert cfg["tile_n"].size == [7, 7, 1]
cfg = FallbackConfigEntity()
cfg.define_split("tile_n", cfg.axis(49), num_outputs=3)
try:
cfg.fallback_split("tile_n", [-1, 1, 0])
assert False
except RuntimeError:
pass
def _raises_exception(f):
try:
f()
except Exception:
return True
return False
def test_multi_filter():
cfg = ConfigSpace()
gemm_func(cfg, 128)
cfg_mf = ConfigSpace()
gemm_func(cfg_mf, 128)
cfg_mf.multi_filter(
filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
assert len(cfg) == 64
assert len(cfg_mf) == 34
assert cfg.range_length == 64
assert cfg_mf.range_length == 64
assert cfg.dims == [8, 8]
assert cfg_mf.dims == [8, 8]
assert cfg.is_index_valid(0) is True
assert cfg.is_index_valid(15) is True
assert cfg_mf.is_index_valid(0) is False
assert cfg_mf.is_index_valid(15) is True
assert _raises_exception(lambda: cfg.get(0)) is False
assert _raises_exception(lambda: cfg.get(15)) is False
assert _raises_exception(lambda: cfg_mf.get(0)) is True
assert _raises_exception(lambda: cfg_mf.get(15)) is False
assert cfg.subrange_length(0, 64) == 64
assert cfg.subrange_length(0, 32) == 32
assert cfg.subrange_length(16, 32) == 16
assert cfg.subrange_length(16, 16) == 0
assert _raises_exception(lambda: cfg.subrange_length(0, 128))
assert _raises_exception(lambda: cfg.subrange_length(-64, 64))
assert _raises_exception(lambda: cfg.subrange_length(64, 0))
assert cfg_mf.subrange_length(0, 64) == 34
assert cfg_mf.subrange_length(0, 32) == 17
assert cfg_mf.subrange_length(16, 32) == 10
assert cfg_mf.subrange_length(16, 16) == 0
assert _raises_exception(lambda: cfg_mf.subrange_length( |
0, 128))
assert _raises_exception(lambda: cfg_mf.subrange_length(-64, 64))
assert _raises_exception(lambda: cfg_mf.subrange_length(64, 0))
assert cfg.point2knob(0) == [0, 0]
assert cfg.point2knob(4) == [4, 0]
assert cfg.point2knob(8) == [0, 1]
assert cfg.point2knob(12) == [4, 1]
assert cfg_mf.point2knob(0) == [0, 0]
assert cfg_mf.point2knob(4) == [4, 0]
assert cfg_mf.point2knob(8) == [0, 1]
assert cfg_mf.point2knob(12) == [4, 1]
assert cfg.knob2point([0, 0]) == 0
assert cfg.knob2point([4, 0]) == 4
assert cfg.knob2point([0, 1]) == 8
assert cfg.knob2point([4, 1]) == 12
assert cfg_mf.knob2point([0, 0]) == 0
assert cfg_mf.knob2point([4, 0]) == 4
assert cfg_mf.knob2point([0, 1]) == 8
assert cfg_mf.knob2point([4, 1]) == 12
cfg_valid_indexes = list(filter(lambda idx: cfg.is_index_valid(idx), range(cfg.range_length)))
assert cfg.get_rand_index() in cfg_valid_indexes
assert cfg.get_rand_index(start=15, end=16) == 15
assert 10 <= cfg.get_rand_index(start=10, end=20) < 20
assert cfg.get_rand_index(to_exclude=cfg_valid_indexes[:-1]) == cfg_valid_indexes[-1:][0]
cfg_mf_valid_indexes = list(
filter(lambda idx: cfg_mf.is_index_valid(idx), range(cfg_mf.range_length))
)
assert cfg_mf.get_rand_index() in cfg_mf_valid_indexes
assert cfg_mf.get_rand_index(start=15, end=16) == 15
assert 10 <= cfg_mf.get_rand_index(start=10, end=20) < 20
assert (
cfg_mf.get_rand_index(to_exclude=cfg_mf_valid_indexes[:-1]) == cfg_mf_valid_indexes[-1:][0]
)
assert cfg.get_next_index(0) == 1
assert cfg.get_next_index(0, 1) == 1
assert cfg.get_next_index(0, 2) == 2
assert cfg.get_next_index(0, -1) is None
assert cfg.get_next_index(0, -2) is None
assert cfg.get_next_index(63) is None
assert cfg.get_next_index(63, 1) is None
assert cfg.get_next_index(63, 2) is None
assert cfg.get_next_index(63, -1) == 62
assert cfg.get_next_index(63, -2) == 61
assert cfg.get_ne |
xt_index(60, 1, end=63) == 61
assert cfg.get_next_index(63, -1, start=60) == 62
assert cfg_mf.get_next_index(0) == 5
assert cfg_mf.get_next_index(0, 1) == 5
assert cfg_mf.get_next_index(0, 2) == 6
assert cfg_mf.get_next_index(0, -1) is None
assert cfg_mf.get_next_index(0, -2) is None
assert cfg_mf.get_next_index(63) is None
assert cfg_mf.get_next_index(63, 1) is None
assert cfg_mf.get_next_index(63, 2) is None
assert cfg_mf.get_next_index(63, -1) == 58
assert cfg_mf.get_next_index(63, -2) == 57
assert cfg_mf.get_next_index(60, 1, end=63) is None
assert cfg_mf.get_next_index(63, -1, start=60) is None
cfg_ints = cfg.sample_ints(5)
assert len(cfg_ints) == 5
assert set(cfg_ints).issubset(cfg_valid_indexes)
cfg_mf_ints = cfg_mf.sample_ints(5)
assert len(cfg_mf_ints) == 5
assert set(cfg_mf_ints).issubset(cfg_mf_valid_indexes)
cfg_walk = cfg.random_walk(15)
assert cfg_walk != 15
assert cfg_walk in cfg_valid_indexes
cfg_mf_walk = cfg_mf.random_walk(15)
assert cfg_mf_walk != 15
assert cfg_mf_walk in cfg_mf_valid_indexes
def test_filter_and_multi_filter():
cfg = ConfigSpace()
gemm_func(cfg, 128, filter_y=lambda y: y.size[-1] < 64)
assert len(cfg) == 48
assert cfg.range_length == 48
cfg.multi_filter(
filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
assert len(cfg) == 27
assert cfg.range_length == 48
cfg = ConfigSpace()
s, (A, B, C) = gemm_func(cfg, 128, filter_y=None)
cfg.multi_filter(
filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
assert len(cfg) == 34
assert cfg.range_length == 64
y, x = s[C].op.axis
cfg.define_split("tile_y", cfg.axis(y), num_outputs=2, filter=lambda y: y.size[-1] < 64)
assert len(cfg) == 27
assert cfg.range_length == 48
if __name__ == "__main__":
test_split()
test_multi_filter() |
test_filter_and_multi_filter() |
import time |
import multiprocessing |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm.autotvm |
import MeasureInput, MeasureResult
from tvm.autotvm.tuner.xgboost_cost_model |
import XGBoostCostModel
from tvm.testing.autotvm |
import get_sample_task, get_sample_records
def test_fit():
task, target = get_sample_task()
records = get_sample_records(n=500)
base_model = XGBoostCostModel(task, feature_type="itervar", loss_type="rank")
base_model.fit_log(records, plan_size=32)
upper_model = XGBoostCostModel(task, feature_type="itervar", loss_type="rank")
upper_model.load_basemodel(base_model)
xs = np.arange(10)
ys = np.arange(10)
upper_model.fit(xs, ys, plan_size=32)
upper_model.predict(np.ones(12))
upper_model.predict(np.ones(8))
def fit_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_fit()
def test_fit_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=test_fit)
p.start()
p.join()
def test_tuner():
task, target = get_sample_task()
records = get_sample_records(n=10)
tuner = autotvm.tuner.XGBTuner(task)
tuner.load_history(records, min_seed_records=10)
assert tuner.cost_model.base_model is not None
tuner = autotvm.tuner.XGBTuner(task)
tuner.load_history(records, min_seed_records=11)
assert tuner.cost_model.base_model is None
def test_update():
task, target = get_sample_task()
tuner = autotvm.tuner.XGBTuner(task)
n_records = 5
records = get_sample_records(n=n_records)
tuner.update([inp for inp, _ in records], [res for _, res in records])
assert len(tuner.xs) == n_records
assert len(tuner.ys) == n_records
assert len(tuner.visited) == n_records
assert all(x in tuner.visited for x in tuner.xs)
if __name__ == "__main__":
test_fit()
test_fit_spawn()
test_tuner()
test_update() |
import pytest |
import tvm |
import tvm.testing
from tvm.ir.base |
import get_first_structural_mismatch
from tvm.runtime |
import ObjectPath
def get_first_mismatch_ensure_symmetry(a, b):
mismatch = get_first_structural_mismatch(a, b)
mismatch_swapped = get_first_structural_mismatch(b, a)
if mismatch is None and mismatch_swapped is None:
return None
if (
mismatch is None
or mismatch_swapped is None
or mismatch[0] != mismatch_swapped[1]
or mismatch[1] != mismatch_swapped[0]
):
raise AssertionError(
"get_first_structural_mismatch(a, b) and get_first_structural_mismatch(b, a) returned"
" inconsistent results '{}' and '{}' for a='{}', b='{}'".format(
mismatch, mismatch_swapped, a, b
)
)
a_path, b_path = mismatch
b_path_swapped, a_path_swapped = mismatch_swapped
assert a_path == a_path_swapped
assert b_path == b_path_swapped
return mismatch
@pytest.mark.parametrize(
"a, b, expected_a_path, expected_b_path",
[
(
[1, 2, 3],
[1, 4, 3],
ObjectPath.root().array_index(1).attr("value"),
ObjectPath.root().array_index(1).attr("value"),
),
(
[1, 2, 3],
[10, 2, 30],
ObjectPath.root().array_index(0).attr("value"),
ObjectPath.root().array_index(0).attr("value"),
),
(
[1, 3, 4],
[1, 2, 3, 4],
ObjectPath.root().array_index(1).attr("value"),
ObjectPath.root().array_index(1).attr("value"),
),
(
[1, 2, 3],
[1, 2, 3, 4],
ObjectPath.root().missing_array_element(3),
ObjectPath.root().array_index(3),
),
(
[],
[1],
ObjectPath.root().missing_array_element(0),
ObjectPath.root().array_index(0),
),
],
)
def test_array_structural_mismatch(a, b, expected_a_path, expected_b_path):
a = tvm.runtime.convert(a)
b = tvm.runtime.convert(b)
a_path, b_path = get_first_mismatch_ensure_sy |
mmetry(a, b)
assert a_path == expected_a_path
assert b_path == expected_b_path
@pytest.mark.parametrize(
"contents",
[
[],
[1],
[1, 2, 3],
],
)
def test_array_structural_equal_to_self(contents):
a = tvm.runtime.convert(list(contents))
b = tvm.runtime.convert(list(contents))
assert get_first_mismatch_ensure_symmetry(a, b) is None
@pytest.mark.parametrize(
"a, b, expected_a_path, expected_b_path",
[
(
dict(a=3, b=4),
dict(a=3, b=5),
ObjectPath.root().map_value("b").attr("value"),
ObjectPath.root().map_value("b").attr("value"),
),
(
dict(a=3, b=4),
dict(a=3, b=4, c=5),
ObjectPath.root().missing_map_entry(),
ObjectPath.root().map_value("c"),
),
],
)
def test_string_map_structural_mismatch(a, b, expected_a_path, expected_b_path):
a = tvm.runtime.convert(a)
b = tvm.runtime.convert(b)
a_path, b_path = get_first_mismatch_ensure_symmetry(a, b)
assert a_path == expected_a_path
assert b_path == expected_b_path
@pytest.mark.parametrize(
"contents",
[
dict(),
dict(a=1),
dict(a=3, b=4, c=5),
],
)
def test_string_structural_equal_to_self(contents):
a = tvm.runtime.convert(dict(contents))
b = tvm.runtime.convert(dict(contents))
assert get_first_mismatch_ensure_symmetry(a, b) is None
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import os |
import pathlib |
import shutil |
import pytest
pytest.importorskip("pty") |
import pytest |
import tvm |
import tvm.relay |
import tvm.testing
from tvm.target |
import Target
from tvm.relay.backend |
import Runtime
from tvm.relay.backend |
import Executor
BUILD = True
DEBUG = False
TARGET = tvm.target.target.micro("host")
def _make_sess_from_op(temp_dir, op_name, sched, arg_bufs):
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, Target(TARGET, TARGET), runtime=runtime, name=op_name)
return _make_session(temp_dir, mod)
def _make_session(temp_dir, mod):
template_project_dir = os.path.join(tvm.micro.get_standalone_crt_dir(), "template", "host")
project = tvm.micro.generate_project(
template_project_dir, mod, temp_dir / "project", {"verbose": 1}
)
project.build()
project.flash()
return tvm.micro.Session(project.transport())
def _make_add_sess(temp_dir):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.placeholder((1,), dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(temp_dir, "add", sched, [A, B, C])
def _make_ident_sess(temp_dir):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.compute(A.shape, lambda i: A[i], name="B")
sched = tvm.te.create_schedule(B.op)
return _make_sess_from_op(temp_dir, "ident", sched, [A, B])
@tvm.testing.requires_micro
def test_compile_runtime():
"""Test compiling the on-device runtime.""" |
import tvm.micro
temp_dir = tvm.contrib.utils.tempdir()
with _make_add_sess(temp_dir) as sess:
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
@tvm.testing.requires_micro
def test_compile_runtime_llvm():
"""Test targeting the on-device runtime with the llvm backend."""
global TARGET
old_target = TARGET
try:
target_str = str(TARGET)
assert target_str.startswith("c ")
TARGET = tvm.target.Target("llvm " + str(TARGET)[len("c ") :])
test_compile_runtime()
finally:
TARGET = old_target
@tvm.testing.requires_micro
def test_reset():
"""Test when the remote end resets during a session.""" |
import tvm.micro
from tvm.micro |
import transport
temp_dir = tvm.contrib.utils.tempdir()
with _make_add_sess(temp_dir) as sess:
try:
sess._rpc.get_function("tvm.testing.reset_server")()
assert False, "expected to raise SessionTerminatedError; did not raise"
except tvm.micro.SessionTerminatedError:
pass
@tvm.testing.requires_micro
def test_graph_executor():
"""Test use of the graph executor with microTVM."""
ws_root = pathlib.Path(os.path.dirname(__file__) + "/micro-workspace")
if ws_root.exists():
shutil.rmtree(ws_root)
temp_dir = tvm.contrib.utils.tempdir(ws_root.resolve())
relay_mod = tvm.parser.fromtext(
"""
def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) {
%0 = %a + %b;
%0
}"""
)
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(relay_mod, target=TARGET, runtime=runtime)
def do_test(graph_mod):
A_data = tvm.nd.array(np.array([2, 3], dtype="uint8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4, 7], dtype="uint8"), device=sess.device)
assert (B_data.numpy() == np.array([4, 7])).all()
assert graph_mod.get_input_index("a") == 0
assert graph_mod.get_input_index("b") == 1
graph_mod.run(a=A_data, b=B_data)
out = graph_mod.get_output(0)
assert (out.numpy() == np.array([6, 10])).all()
with _make_session(temp_dir, factory) as sess:
graph_mod_local = tvm.micro.create_local_graph_executor(
factory.get_graph_json(), sess.get_system_lib(), sess.device
)
do_test(graph_mod_local)
graph_mod = tvm.contrib.graph_executor.create(
factory.get_graph_json(), sess.get_system_lib(), sess.device
)
do_test(graph_mod)
@tvm.testing.requires_micro
def test_aot_executor() |
:
"""Test use of the AOT executor with microTVM."""
ws_root = pathlib.Path(os.path.dirname(__file__) + "/micro-workspace")
if ws_root.exists():
shutil.rmtree(ws_root)
temp_dir = tvm.contrib.utils.tempdir(ws_root.resolve())
relay_mod = tvm.parser.fromtext(
"""
def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) {
%0 = %a + %b;
%0
}"""
)
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("aot")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(relay_mod, target=TARGET, runtime=runtime, executor=executor)
def do_test():
aot_executor = tvm.runtime.executor.aot_executor.AotModule(
sess._rpc.get_function("tvm.aot_executor.create")(
sess.get_system_lib(), sess.device, "default"
)
)
assert aot_executor.get_input_index("a") == 0
assert aot_executor.get_input_index("b") == 1
assert aot_executor.get_num_inputs() == 2
assert aot_executor.get_num_outputs() == 1
A_np = np.array([[2, 3]], dtype="uint8")
B_np = np.array([[4, 7]], dtype="uint8")
A_data = aot_executor.get_input("a").copyfrom(A_np)
B_data = aot_executor.get_input("b").copyfrom(B_np)
aot_executor.run()
out = aot_executor.get_output(0)
assert (out.numpy() == np.array([6, 10])).all()
B_np_new = np.array([[5, 8]])
aot_executor.set_input("b", B_np_new)
assert (B_data.numpy() == B_np_new).all()
with _make_session(temp_dir, factory) as sess:
do_test()
enable_usmp, expect_exception = tvm.testing.parameters((True, True), (False, False))
@tvm.testing.requires_micro
def test_aot_executor_usmp_const_pool(enable_usmp, expect_exception):
"""Test the AOT executor with microTVM using usmp.
Test should fail if const pool is supplied to executor
as these are currently not supported
""" |
ws_root = pathlib.Path(os.path.dirname(__file__) + "/micro-workspace-usmp")
if ws_root.exists():
shutil.rmtree(ws_root)
temp_dir = tvm.contrib.utils.tempdir(ws_root.resolve())
relay_mod = tvm.parser.fromtext(
"""
def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8], %c : Tensor[(1,2), uint8]) {
%0 = %a + %b;
%1 = %0 + %c;
%1
}"""
)
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("aot")
main_func = relay_mod["main"]
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
B_np = np.array([[4, 7]], dtype="uint8").astype(type_dict["b"])
C_np = np.array([[8, 9]], dtype="uint8").astype(type_dict["c"])
params = {"c": C_np}
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": enable_usmp}
):
factory = tvm.relay.build(
relay_mod,
target=TARGET,
runtime=runtime,
executor=executor,
params=params,
)
def do_test():
try:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(
sess._rpc.get_function("tvm.aot_executor.create")(
sess.get_system_lib(), sess.device, "default"
)
)
except tvm._ffi.base.TVMError as e:
if expect_exception:
return
else:
raise e
assert aot_executor.get_input_index("a") == 0
assert aot_executor.get_input_index("b") == 1
assert aot_executor.get_num_inputs() == 2
assert aot_executor.get_num_outputs() == 1
A_np = np.array([[2, 3]], dtype="uint8")
B_np = np.array([[4, 7]], dtype="uint8")
A_data = aot_executor.get_input("a").copyfrom(A_np)
B_data = aot_executor.get_input("b").copyfrom(B_np)
aot_executor.run()
out = aot_executor.get_output(0)
assert (out.numpy() == |
np.array([14, 19])).all()
B_np_new = np.array([[5, 8]])
aot_executor.set_input("b", B_np_new)
assert (B_data.numpy() == B_np_new).all()
with _make_session(temp_dir, factory) as sess:
do_test()
@tvm.testing.requires_micro
def test_std_math_functions():
"""Verify that standard math functions can be used.""" |
import tvm.micro
temp_dir = tvm.contrib.utils.tempdir()
with _make_add_sess(temp_dir) as sess:
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
temp_dir = tvm.contrib.utils.tempdir()
A = tvm.te.placeholder((2,), dtype="float32", name="A")
B = tvm.te.compute(A.shape, lambda i: tvm.te.exp(A[i]), name="B")
s = tvm.te.create_schedule(B.op)
with _make_sess_from_op(temp_dir, "myexpf", s, [A, B]) as sess:
A_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device)
B_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device)
lib = sess.get_system_lib()
func = lib["myexpf"]
func(A_data, B_data)
np.testing.assert_allclose(B_data.numpy(), np.array([7.389056, 20.085537]))
@tvm.testing.requires_micro
def test_platform_timer():
"""Verify the platform timer can be used to time remote functions.""" |
import tvm.micro
temp_dir = tvm.contrib.utils.tempdir()
A = tvm.te.placeholder((2,), dtype="float32", name="A")
B = tvm.te.compute(A.shape, lambda i: tvm.te.exp(A[i]), name="B")
s = tvm.te.create_schedule(B.op)
with _make_sess_from_op(temp_dir, "myexpf", s, [A, B]) as sess:
A_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device)
B_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device)
lib = sess.get_system_lib()
time_eval_f = lib.time_evaluator(
"myexpf", sess.device, number=2000, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data)
assert result.mean > 0
assert len(result.results) == 3
@tvm.testing.requires_micro
def test_autotune():
"""Verify that autotune works with micro.""" |
import tvm.relay as relay
from tvm.micro.testing.utils |
import check_tune_log
runtime = Runtime("crt", {"system-lib": True})
data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
weight = relay.var("weight", relay.TensorType((8, 3, 5, 5), "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
target = tvm.target.target.micro("host")
template_project_dir = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
with pass_context:
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) > 0
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=template_project_dir,
project_options={},
)
builder = tvm.autotvm.LocalBuilder(
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=True,
build_func=tvm.micro.autotvm_build_func,
runtime=runtime,
)
runner = tvm.autotvm.LocalRunner(number=1, repeat=1, module_loader=module_loader)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
tune_log_file = pathlib.Path("crt_autotune.log")
if tune_log_file.exists():
tune_log_file.unlink()
num_trials = 10
for task in tasks:
tuner = tvm.autotvm.tuner.GATuner(task) |
tuner.tune(
n_trial=num_trials,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file(str(tune_log_file)),
tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"),
],
si_prefix="M",
)
assert tuner.best_flops > 0
with pass_context:
lowered = tvm.relay.build(mod, target=TARGET, runtime=runtime, params=params)
temp_dir = tvm.contrib.utils.tempdir()
project = tvm.micro.generate_project(template_project_dir, lowered, temp_dir / "project")
project.build()
with tvm.micro.Session(project.transport()) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered.get_params())
graph_mod.run(**inputs)
expected_output = graph_mod.get_output(0).numpy()
del graph_mod
with tvm.autotvm.apply_history_best(str(tune_log_file)):
with pass_context:
lowered_tuned = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = tvm.contrib.utils.tempdir()
project = tvm.micro.generate_project(template_project_dir, lowered_tuned, temp_dir / "project")
project.build()
with tvm.micro.Session(project.transport()) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered_tuned.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered_tuned.get_params())
graph_mod.run(**inputs)
output = graph_mod.get_output(0).numpy()
del graph_mod
tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main() |
"""Unit tests for the Bring Your Own Datatype framework.
TODO(@gussmith23 @hypercubestart) link to documentation""" |
import numpy as np |
import pytest |
import tvm |
import tvm.topi.testing |
import tvm.testing
from tvm |
import relay
from tvm.relay.testing.layers |
import batch_norm_infer
from tvm.target.datatype |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.